summaryrefslogtreecommitdiffstats
path: root/ansible_collections/amazon/aws/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/amazon/aws/plugins')
-rw-r--r--ansible_collections/amazon/aws/plugins/action/s3_object.py75
-rw-r--r--ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py71
-rw-r--r--ansible_collections/amazon/aws/plugins/doc_fragments/aws.py133
-rw-r--r--ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py45
-rw-r--r--ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py21
-rw-r--r--ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py19
-rw-r--r--ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py30
-rw-r--r--ansible_collections/amazon/aws/plugins/doc_fragments/tags.py62
-rw-r--r--ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py926
-rw-r--r--ansible_collections/amazon/aws/plugins/inventory/aws_rds.py403
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py136
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/aws_secret.py295
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py90
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py286
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/_version.py344
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/acm.py222
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/arn.py69
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/batch.py58
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/botocore.py357
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/cloud.py213
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py229
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/core.py77
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py89
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/ec2.py310
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py109
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/elbv2.py1114
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/iam.py75
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/modules.py451
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/policy.py179
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/rds.py387
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/retries.py78
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/route53.py64
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/s3.py102
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/tagging.py181
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/tower.py83
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/transformation.py140
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/urls.py238
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/version.py18
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/waf.py224
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/waiters.py1265
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py1962
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py460
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/aws_az_info.py186
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py108
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudformation.py794
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py461
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudtrail.py641
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py238
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py354
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py323
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py516
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py351
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py139
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py218
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_ami.py761
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py283
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eip.py666
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py147
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eni.py875
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py300
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_instance.py2108
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py587
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_key.py339
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py598
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py1483
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py305
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py420
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py295
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py626
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py300
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_tag.py167
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py73
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vol.py862
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py212
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py537
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py215
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py482
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py297
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py179
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py266
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py176
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py949
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py215
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py720
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py269
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py842
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py279
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py570
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py223
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py828
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py343
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py2147
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_policy.py351
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py209
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_user.py580
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_user_info.py199
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/kms_key.py1000
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/kms_key_info.py531
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda.py803
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_alias.py330
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_event.py432
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_execute.py285
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_info.py538
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_policy.py426
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster.py1024
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py309
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py374
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_instance.py1476
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py424
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py386
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_option_group.py667
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py328
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_param_group.py341
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py389
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py374
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/route53.py797
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/route53_health_check.py650
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/route53_info.py836
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/route53_zone.py481
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_bucket.py1184
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_object.py1286
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_object_info.py818
122 files changed, 54791 insertions, 0 deletions
diff --git a/ansible_collections/amazon/aws/plugins/action/s3_object.py b/ansible_collections/amazon/aws/plugins/action/s3_object.py
new file mode 100644
index 00000000..a78dd0be
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/action/s3_object.py
@@ -0,0 +1,75 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2018, Will Thames <will@thames.id.au>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound
+from ansible.module_utils._text import to_text
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for s3_object operations
+
+ This adds the magic that means 'src' can point to both a 'remote' file
+ on the 'host' or in the 'files/' lookup path on the controller.
+ '''
+ self._supports_async = True
+
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ source = self._task.args.get('src', None)
+
+ try:
+ new_module_args = self._task.args.copy()
+ if source:
+ source = os.path.expanduser(source)
+
+ # For backward compatibility check if the file exists on the remote; it should take precedence
+ if not self._remote_file_exists(source):
+ try:
+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)
+ new_module_args['src'] = source
+ except AnsibleFileNotFound:
+ # module handles error message for nonexistent files
+ new_module_args['src'] = source
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+ # execute the s3_object module with the updated args
+ result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async))
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ return result
diff --git a/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py b/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py
new file mode 100644
index 00000000..551a866a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py
@@ -0,0 +1,71 @@
+# (C) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: aws_resource_actions
+ type: aggregate
+ short_description: summarizes all "resource:actions" completed
+ description:
+ - Ansible callback plugin for collecting the AWS actions completed by all boto3 modules using
+ AnsibleAWSModule in a playbook. Botocore endpoint logs need to be enabled for those modules, which can
+ be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults.
+ requirements:
+ - whitelisting in configuration - see examples section below for details.
+'''
+
+EXAMPLES = '''
+example: >
+ To enable, add this to your ansible.cfg file in the defaults block
+ [defaults]
+ callback_whitelist = aws_resource_actions
+sample output: >
+#
+# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload',
+# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload',
+# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject']
+#
+sample output: >
+#
+# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags',
+# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc']
+#
+'''
+
+from ansible.plugins.callback import CallbackBase
+from ansible.module_utils._text import to_native
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.8
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'amazon.aws.aws_resource_actions'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+ self.aws_resource_actions = []
+ super(CallbackModule, self).__init__()
+
+ def extend_aws_resource_actions(self, result):
+ if result.get('resource_actions'):
+ self.aws_resource_actions.extend(result['resource_actions'])
+
+ def runner_on_ok(self, host, res):
+ self.extend_aws_resource_actions(res)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.extend_aws_resource_actions(res)
+
+ def v2_runner_item_on_ok(self, result):
+ self.extend_aws_resource_actions(result._result)
+
+ def v2_runner_item_on_failed(self, result):
+ self.extend_aws_resource_actions(result._result)
+
+ def playbook_on_stats(self, stats):
+ if self.aws_resource_actions:
+ self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions)))
+ self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions))
diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py
new file mode 100644
index 00000000..1a3833ff
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Will Thames <will@thames.id.au>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # AWS only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ access_key:
+ description:
+ - AWS access key ID.
+ - See the AWS documentation for more information about access tokens
+ U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys).
+ - The C(AWS_ACCESS_KEY_ID), C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY)
+ environment variables may also be used in decreasing order of
+ preference.
+ - The I(aws_access_key) and I(profile) options are mutually exclusive.
+ - The I(aws_access_key_id) alias was added in release 5.1.0 for
+ consistency with the AWS botocore SDK.
+ - The I(ec2_access_key) alias has been deprecated and will be removed in a
+ release after 2024-12-01.
+ - Support for the C(EC2_ACCESS_KEY) environment variable has been
+ deprecated and will be removed in a release after 2024-12-01.
+ type: str
+ aliases: ['aws_access_key_id', 'aws_access_key', 'ec2_access_key']
+ secret_key:
+ description:
+ - AWS secret access key.
+ - See the AWS documentation for more information about access tokens
+ U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys).
+ - The C(AWS_SECRET_ACCESS_KEY), C(AWS_SECRET_KEY), or C(EC2_SECRET_KEY)
+ environment variables may also be used in decreasing order of
+ preference.
+ - The I(secret_key) and I(profile) options are mutually exclusive.
+ - The I(aws_secret_access_key) alias was added in release 5.1.0 for
+ consistency with the AWS botocore SDK.
+ - The I(ec2_secret_key) alias has been deprecated and will be removed in a
+ release after 2024-12-01.
+ - Support for the C(EC2_SECRET_KEY) environment variable has been
+ deprecated and will be removed in a release after 2024-12-01.
+ type: str
+ aliases: ['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key']
+ session_token:
+ description:
+ - AWS STS session token for use with temporary credentials.
+ - See the AWS documentation for more information about access tokens
+ U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys).
+ - The C(AWS_SESSION_TOKEN), C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN)
+ environment variables may also be used in decreasing order of preference.
+ - The I(security_token) and I(profile) options are mutually exclusive.
+ - Aliases I(aws_session_token) and I(session_token) were added in release
+ 3.2.0, with the parameter being renamed from I(security_token) to
+ I(session_token) in release 6.0.0.
+ - The I(security_token), I(aws_security_token), and I(access_token)
+ aliases have been deprecated and will be removed in a release after
+ 2024-12-01.
+ - Support for the C(EC2_SECRET_KEY) and C(AWS_SECURITY_TOKEN) environment
+ variables has been deprecated and will be removed in a release after
+ 2024-12-01.
+ type: str
+ aliases: ['aws_session_token', 'security_token', 'aws_security_token', 'access_token']
+ profile:
+ description:
+ - A named AWS profile to use for authentication.
+ - See the AWS documentation for more information about named profiles
+ U(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html).
+ - The C(AWS_PROFILE) environment variable may also be used.
+ - The I(profile) option is mutually exclusive with the I(aws_access_key),
+ I(aws_secret_key) and I(security_token) options.
+ type: str
+ aliases: ['aws_profile']
+
+ endpoint_url:
+ description:
+ - URL to connect to instead of the default AWS endpoints. While this
+ can be used to connection to other AWS-compatible services the
+ amazon.aws and community.aws collections are only tested against
+ AWS.
+ - The C(AWS_URL) or C(EC2_URL) environment variables may also be used,
+ in decreasing order of preference.
+ - The I(ec2_url) and I(s3_url) aliases have been deprecated and will be
+ removed in a release after 2024-12-01.
+ - Support for the C(EC2_URL) environment variable has been deprecated and
+ will be removed in a release after 2024-12-01.
+ type: str
+ aliases: ['ec2_url', 'aws_endpoint_url', 's3_url' ]
+ aws_ca_bundle:
+ description:
+ - The location of a CA Bundle to use when validating SSL certificates.
+ - The C(AWS_CA_BUNDLE) environment variable may also be used.
+ type: path
+ validate_certs:
+ description:
+ - When set to C(false), SSL certificates will not be validated for
+ communication with the AWS APIs.
+ - Setting I(validate_certs=false) is strongly discouraged, as an
+ alternative, consider setting I(aws_ca_bundle) instead.
+ type: bool
+ default: true
+ aws_config:
+ description:
+ - A dictionary to modify the botocore configuration.
+ - Parameters can be found in the AWS documentation
+ U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config).
+ type: dict
+ debug_botocore_endpoint_logs:
+ description:
+ - Use a C(botocore.endpoint) logger to parse the unique (rather than total)
+ C("resource:action") API calls made during a task, outputing the set to
+ the resource_actions key in the task results. Use the
+ C(aws_resource_action) callback to output to total list made during
+ a playbook.
+ - The C(ANSIBLE_DEBUG_BOTOCORE_LOGS) environment variable may also be used.
+ type: bool
+ default: false
+notes:
+ - B(Caution:) For modules, environment variables and configuration files are
+ read from the Ansible 'host' context and not the 'controller' context.
+ As such, files may need to be explicitly copied to the 'host'. For lookup
+ and connection plugins, environment variables and configuration files are
+ read from the Ansible 'controller' context and not the 'host' context.
+ - The AWS SDK (boto3) that Ansible uses may also read defaults for credentials
+ and other settings, such as the region, from its configuration files in the
+ Ansible 'host' context (typically C(~/.aws/credentials)).
+ See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html)
+ for more information.
+'''
diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py
new file mode 100644
index 00000000..73eff046
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Plugin options for AWS credentials
+ DOCUMENTATION = r'''
+options:
+ aws_profile:
+ description: The AWS profile
+ type: str
+ aliases: [ boto_profile ]
+ env:
+ - name: AWS_DEFAULT_PROFILE
+ - name: AWS_PROFILE
+ aws_access_key:
+ description: The AWS access key to use.
+ type: str
+ aliases: [ aws_access_key_id ]
+ env:
+ - name: EC2_ACCESS_KEY
+ - name: AWS_ACCESS_KEY
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_key:
+ description: The AWS secret key that corresponds to the access key.
+ type: str
+ aliases: [ aws_secret_access_key ]
+ env:
+ - name: EC2_SECRET_KEY
+ - name: AWS_SECRET_KEY
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_security_token:
+ description: The AWS security token if using temporary access and secret keys.
+ type: str
+ env:
+ - name: EC2_SECURITY_TOKEN
+ - name: AWS_SESSION_TOKEN
+ - name: AWS_SECURITY_TOKEN
+'''
diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py
new file mode 100644
index 00000000..52152660
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Plugin option for AWS region
+ DOCUMENTATION = r'''
+options:
+ region:
+ description: The region for which to create the connection.
+ type: str
+ env:
+ - name: EC2_REGION
+ - name: AWS_REGION
+'''
diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py b/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py
new file mode 100644
index 00000000..a88e2e01
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Minimum requirements for the collection
+ DOCUMENTATION = r'''
+options: {}
+requirements:
+ - python >= 3.6
+ - boto3 >= 1.18.0
+ - botocore >= 1.21.0
+'''
diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py b/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py
new file mode 100644
index 00000000..017652b5
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # EC2 only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ region:
+ description:
+ - The AWS region to use.
+ - For global services such as IAM, Route53 and CloudFront, I(region)
+ is ignored.
+ - The C(AWS_REGION) or C(EC2_REGION) environment variables may also
+ be used.
+ - See the Amazon AWS documentation for more information
+ U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region).
+ - The C(ec2_region) alias has been deprecated and will be removed in
+ a release after 2024-12-01
+ - Support for the C(EC2_REGION) environment variable has been
+ deprecated and will be removed in a release after 2024-12-01.
+ type: str
+ aliases: [ aws_region, ec2_region ]
+'''
diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py b/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py
new file mode 100644
index 00000000..9d381cb8
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Tagging related parameters
+ DOCUMENTATION = r'''
+options:
+ tags:
+ description:
+ - A dictionary representing the tags to be applied to the resource.
+ - If the I(tags) parameter is not set then tags will not be modified.
+ type: dict
+ required: false
+ aliases: ['resource_tags']
+ purge_tags:
+ description:
+ - If I(purge_tags=true) and I(tags) is set, existing tags will be purged
+ from the resource to match exactly what is defined by I(tags) parameter.
+ - If the I(tags) parameter is not set then tags will not be modified, even
+ if I(purge_tags=True).
+ - Tag keys beginning with C(aws:) are reserved by Amazon and can not be
+ modified. As such they will be ignored for the purposes of the
+ I(purge_tags) parameter. See the Amazon documentation for more information
+ U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions).
+ type: bool
+ default: true
+ required: false
+'''
+
+ # Some modules had a default of purge_tags=False, this was generally
+ # deprecated in release 4.0.0
+ DEPRECATED_PURGE = r'''
+options:
+ tags:
+ description:
+ - A dictionary representing the tags to be applied to the resource.
+ - If the I(tags) parameter is not set then tags will not be modified.
+ type: dict
+ required: false
+ aliases: ['resource_tags']
+ purge_tags:
+ description:
+ - If I(purge_tags=true) and I(tags) is set, existing tags will be purged
+ from the resource to match exactly what is defined by I(tags) parameter.
+ - If the I(tags) parameter is not set then tags will not be modified, even
+ if I(purge_tags=True).
+ - Tag keys beginning with C(aws:) are reserved by Amazon and can not be
+ modified. As such they will be ignored for the purposes of the
+ I(purge_tags) parameter. See the Amazon documentation for more information
+ U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions).
+ - The current default value of C(False) has been deprecated. The default
+ value will change to C(True) in release 5.0.0.
+ type: bool
+ required: false
+'''
diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
new file mode 100644
index 00000000..6452f003
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
@@ -0,0 +1,926 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: aws_ec2
+short_description: EC2 inventory source
+extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ - amazon.aws.boto3
+ - amazon.aws.aws_credentials
+description:
+ - Get inventory hosts from Amazon Web Services EC2.
+ - Uses a YAML configuration file that ends with C(aws_ec2.{yml|yaml}).
+notes:
+ - If no credentials are provided and the control node has an associated IAM instance profile then the
+ role will be used for authentication.
+author:
+ - Sloane Hertel (@s-hertel)
+options:
+ plugin:
+ description: Token that ensures this is a source file for the plugin.
+ required: True
+ choices: ['aws_ec2', 'amazon.aws.aws_ec2']
+ iam_role_arn:
+ description:
+ - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
+ credentials with enough privilege to perform the AssumeRole action.
+ regions:
+ description:
+ - A list of regions in which to describe EC2 instances.
+ - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
+ type: list
+ elements: str
+ default: []
+ hostnames:
+ description:
+ - A list in order of precedence for hostname variables.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ name:
+ description:
+ - Name of the host.
+ - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
+ - If value provided does not exist in the above options, it will be used as a literal string.
+ type: str
+ required: True
+ prefix:
+ description:
+ - Prefix to prepend to I(name). Same options as I(name).
+ - If I(prefix) is specified, final hostname will be I(prefix) + I(separator) + I(name).
+ type: str
+ default: ''
+ required: False
+ separator:
+ description:
+ - Value to separate I(prefix) and I(name) when I(prefix) is specified.
+ type: str
+ default: '_'
+ required: False
+ allow_duplicated_hosts:
+ description:
+ - By default, the first name that matches an entry of the I(hostnames) list is returned.
+ - Turn this flag on if you don't mind having duplicated entries in the inventory
+ and you want to get all the hostnames that match.
+ type: bool
+ default: False
+ version_added: 5.0.0
+ filters:
+ description:
+ - A dictionary of filter value pairs.
+ - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ type: dict
+ default: {}
+ include_filters:
+ description:
+ - A list of filters. Any instances matching at least one of the filters are included in the result.
+ - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ - Every entry in this list triggers a search query. As such, from a performance point of view, it's better to
+ keep the list as short as possible.
+ type: list
+ elements: dict
+ default: []
+ version_added: 1.5.0
+ exclude_filters:
+ description:
+ - A list of filters. Any instances matching one of the filters are excluded from the result.
+ - The filters from C(exclude_filters) take priority over the C(include_filters) and C(filters) keys
+ - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ - Every entry in this list triggers a search query. As such, from a performance point of view, it's better to
+ keep the list as short as possible.
+ type: list
+ elements: dict
+ default: []
+ version_added: 1.5.0
+ include_extra_api_calls:
+ description:
+ - Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
+ - Spot instances may be persistent and instances may have associated events.
+ - The I(include_extra_api_calls) option had been deprecated and will be removed in release 6.0.0.
+ type: bool
+ default: False
+ strict_permissions:
+ description:
+ - By default if a 403 (Forbidden) error code is encountered this plugin will fail.
+ - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
+ type: bool
+ default: True
+ use_contrib_script_compatible_sanitization:
+ description:
+ - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
+ This option allows you to override that, in efforts to allow migration from the old inventory script and
+ matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
+ To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
+ you will need to replace hyphens with underscores via the regex_replace filter for those entries.
+ - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
+ otherwise the core engine will just use the standard sanitization on top.
+ - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
+ which group names end up being used as.
+ type: bool
+ default: False
+ use_contrib_script_compatible_ec2_tag_keys:
+ description:
+ - Expose the host tags with ec2_tag_TAGNAME keys like the old ec2.py inventory script.
+ - The use of this feature is discouraged and we advise to migrate to the new ``tags`` structure.
+ type: bool
+ default: False
+ version_added: 1.5.0
+ hostvars_prefix:
+ description:
+ - The prefix for host variables names coming from AWS.
+ type: str
+ version_added: 3.1.0
+ hostvars_suffix:
+ description:
+ - The suffix for host variables names coming from AWS.
+ type: str
+ version_added: 3.1.0
+'''
+
+EXAMPLES = '''
+# Minimal example using environment vars or instance role credentials
+# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
+plugin: aws_ec2
+regions:
+ - us-east-1
+
+# Example using filters, ignoring permission errors, and specifying the hostname precedence
+plugin: aws_ec2
+# The values for profile, access key, secret key and token can be hardcoded like:
+boto_profile: aws_profile
+# or you could use Jinja as:
+# boto_profile: "{{ lookup('env', 'AWS_PROFILE') | default('aws_profile', true) }}"
+# Populate inventory with instances in these regions
+regions:
+ - us-east-1
+ - us-east-2
+filters:
+ # All instances with their `Environment` tag set to `dev`
+ tag:Environment: dev
+ # All dev and QA hosts
+ tag:Environment:
+ - dev
+ - qa
+ instance.group-id: sg-xxxxxxxx
+# Ignores 403 errors rather than failing
+strict_permissions: False
+# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
+# inventory_hostname use compose (see example below).
+hostnames:
+ - tag:Name=Tag1,Name=Tag2 # Return specific hosts only
+ - tag:CustomDNSName
+ - dns-name
+ - name: 'tag:Name=Tag1,Name=Tag2'
+ - name: 'private-ip-address'
+ separator: '_'
+ prefix: 'tag:Name'
+ - name: 'test_literal' # Using literal values for hostname
+ separator: '-' # Hostname will be aws-test_literal
+ prefix: 'aws'
+
+# Returns all the hostnames for a given instance
+allow_duplicated_hosts: False
+
+# Example using constructed features to create groups and set ansible_host
+plugin: aws_ec2
+regions:
+ - us-east-1
+ - us-west-1
+# keyed_groups may be used to create custom groups
+strict: False
+keyed_groups:
+ # Add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'architecture'
+ # Add hosts to tag_Name_Value groups for each Name/Value tag pair
+ - prefix: tag
+ key: tags
+ # Add hosts to e.g. instance_type_z3_tiny
+ - prefix: instance_type
+ key: instance_type
+ # Create security_groups_sg_abcd1234 group for each SG
+ - key: 'security_groups|json_query("[].group_id")'
+ prefix: 'security_groups'
+ # Create a group for each value of the Application tag
+ - key: tags.Application
+ separator: ''
+ # Create a group per region e.g. aws_region_us_east_2
+ - key: placement.region
+ prefix: aws_region
+ # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
+ - key: tags['Role']
+ prefix: foo
+ parent_group: "project"
+# Set individual variables with compose
+compose:
+ # Use the private IP address to connect to the host
+ # (note: this does not modify inventory_hostname, which is set via I(hostnames))
+ ansible_host: private_ip_address
+
+# Example using include_filters and exclude_filters to compose the inventory.
+plugin: aws_ec2
+regions:
+ - us-east-1
+ - us-west-1
+include_filters:
+- tag:Name:
+ - 'my_second_tag'
+- tag:Name:
+ - 'my_third_tag'
+exclude_filters:
+- tag:Name:
+ - 'my_first_tag'
+
+# Example using groups to assign the running hosts to a group based on vpc_id
+plugin: aws_ec2
+boto_profile: aws_profile
+# Populate inventory with instances in these regions
+regions:
+ - us-east-2
+filters:
+ # All instances with their state as `running`
+ instance-state-name: running
+keyed_groups:
+ - prefix: tag
+ key: tags
+compose:
+ ansible_host: public_dns_name
+groups:
+ libvpc: vpc_id == 'vpc-####'
+# Define prefix and suffix for host variables coming from AWS.
+plugin: aws_ec2
+regions:
+ - us-east-1
+hostvars_prefix: 'aws_'
+hostvars_suffix: '_ec2'
+'''
+
+import re
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # will be captured by imported HAS_BOTO3
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import missing_required_lib
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.plugins.inventory import Cacheable
+from ansible.plugins.inventory import Constructable
+from ansible.template import Templar
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+# The mappings give an array of keys to get from the filter name to the value
+# returned by boto3's EC2 describe_instances method.
+
+instance_meta_filter_to_boto_attr = {
+ 'group-id': ('Groups', 'GroupId'),
+ 'group-name': ('Groups', 'GroupName'),
+ 'network-interface.attachment.instance-owner-id': ('OwnerId',),
+ 'owner-id': ('OwnerId',),
+ 'requester-id': ('RequesterId',),
+ 'reservation-id': ('ReservationId',),
+}
+
+instance_data_filter_to_boto_attr = {
+ 'affinity': ('Placement', 'Affinity'),
+ 'architecture': ('Architecture',),
+ 'availability-zone': ('Placement', 'AvailabilityZone'),
+ 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
+ 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
+ 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
+ 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
+ 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
+ 'client-token': ('ClientToken',),
+ 'dns-name': ('PublicDnsName',),
+ 'host-id': ('Placement', 'HostId'),
+ 'hypervisor': ('Hypervisor',),
+ 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
+ 'image-id': ('ImageId',),
+ 'instance-id': ('InstanceId',),
+ 'instance-lifecycle': ('InstanceLifecycle',),
+ 'instance-state-code': ('State', 'Code'),
+ 'instance-state-name': ('State', 'Name'),
+ 'instance-type': ('InstanceType',),
+ 'instance.group-id': ('SecurityGroups', 'GroupId'),
+ 'instance.group-name': ('SecurityGroups', 'GroupName'),
+ 'ip-address': ('PublicIpAddress',),
+ 'kernel-id': ('KernelId',),
+ 'key-name': ('KeyName',),
+ 'launch-index': ('AmiLaunchIndex',),
+ 'launch-time': ('LaunchTime',),
+ 'monitoring-state': ('Monitoring', 'State'),
+ 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
+ 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
+ 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
+ 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
+ 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
+ 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
+ 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
+ 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
+ 'network-interface.attachment.instance-id': ('InstanceId',),
+ 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
+ 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
+ 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
+ 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
+ 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
+ 'network-interface.description': ('NetworkInterfaces', 'Description'),
+ 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
+ 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
+ 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
+ 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
+ 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
+ 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
+ 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
+ # 'network-interface.requester-id': (),
+ 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.status': ('NetworkInterfaces', 'Status'),
+ 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
+ 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
+ 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
+ 'placement-group-name': ('Placement', 'GroupName'),
+ 'platform': ('Platform',),
+ 'private-dns-name': ('PrivateDnsName',),
+ 'private-ip-address': ('PrivateIpAddress',),
+ 'product-code': ('ProductCodes', 'ProductCodeId'),
+ 'product-code.type': ('ProductCodes', 'ProductCodeType'),
+ 'ramdisk-id': ('RamdiskId',),
+ 'reason': ('StateTransitionReason',),
+ 'root-device-name': ('RootDeviceName',),
+ 'root-device-type': ('RootDeviceType',),
+ 'source-dest-check': ('SourceDestCheck',),
+ 'spot-instance-request-id': ('SpotInstanceRequestId',),
+ 'state-reason-code': ('StateReason', 'Code'),
+ 'state-reason-message': ('StateReason', 'Message'),
+ 'subnet-id': ('SubnetId',),
+ 'tag': ('Tags',),
+ 'tag-key': ('Tags',),
+ 'tag-value': ('Tags',),
+ 'tenancy': ('Placement', 'Tenancy'),
+ 'virtualization-type': ('VirtualizationType',),
+ 'vpc-id': ('VpcId',),
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'amazon.aws.aws_ec2'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ self.group_prefix = 'aws_ec2_'
+
+ # credentials
+ self.boto_profile = None
+ self.aws_secret_access_key = None
+ self.aws_access_key_id = None
+ self.aws_security_token = None
+ self.iam_role_arn = None
+
+ def _compile_values(self, obj, attr):
+ '''
+ :param obj: A list or dict of instance attributes
+ :param attr: A key
+ :return The value(s) found via the attr
+ '''
+ if obj is None:
+ return
+
+ temp_obj = []
+
+ if isinstance(obj, list) or isinstance(obj, tuple):
+ for each in obj:
+ value = self._compile_values(each, attr)
+ if value:
+ temp_obj.append(value)
+ else:
+ temp_obj = obj.get(attr)
+
+ has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
+ if has_indexes and len(temp_obj) == 1:
+ return temp_obj[0]
+
+ return temp_obj
+
+ def _get_boto_attr_chain(self, filter_name, instance):
+ '''
+ :param filter_name: The filter
+ :param instance: instance dict returned by boto3 ec2 describe_instances()
+ '''
+ allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
+
+ # If filter not in allow_filters -> use it as a literal string
+ if filter_name not in allowed_filters:
+ return filter_name
+
+ if filter_name in instance_data_filter_to_boto_attr:
+ boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
+ else:
+ boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
+
+ instance_value = instance
+ for attribute in boto_attr_list:
+ instance_value = self._compile_values(instance_value, attribute)
+ return instance_value
+
+ def _get_credentials(self):
+ '''
+ :return A dictionary of boto client credentials
+ '''
+ boto_params = {}
+ for credential in (('aws_access_key_id', self.aws_access_key_id),
+ ('aws_secret_access_key', self.aws_secret_access_key),
+ ('aws_session_token', self.aws_security_token)):
+ if credential[1]:
+ boto_params[credential[0]] = credential[1]
+
+ return boto_params
+
+ def _get_connection(self, credentials, region='us-east-1'):
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ return connection
+
+ def _boto3_assume_role(self, credentials, region=None):
+ """
+ Assume an IAM role passed by iam_role_arn parameter
+
+ :return: a dict containing the credentials of the assumed role
+ """
+
+ iam_role_arn = self.iam_role_arn
+
+ try:
+ sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
+ sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
+ return dict(
+ aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
+ aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
+ aws_session_token=sts_session['Credentials']['SessionToken']
+ )
+ except botocore.exceptions.ClientError as e:
+ raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
+
+ def _boto3_conn(self, regions):
+ '''
+ :param regions: A list of regions to create a boto3 client
+
+ Generator that yields a boto3 client and the region
+ '''
+
+ credentials = self._get_credentials()
+ iam_role_arn = self.iam_role_arn
+
+ if not regions:
+ try:
+ # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
+ client = self._get_connection(credentials)
+ resp = client.describe_regions()
+ regions = [x['RegionName'] for x in resp.get('Regions', [])]
+ except botocore.exceptions.NoRegionError:
+ # above seems to fail depending on boto3 version, ignore and lets try something else
+ pass
+ except is_boto3_error_code('UnauthorizedOperation') as e: # pylint: disable=duplicate-except
+ if iam_role_arn is not None:
+ try:
+ # Describe regions assuming arn role
+ assumed_credentials = self._boto3_assume_role(credentials)
+ client = self._get_connection(assumed_credentials)
+ resp = client.describe_regions()
+ regions = [x['RegionName'] for x in resp.get('Regions', [])]
+ except botocore.exceptions.NoRegionError:
+ # above seems to fail depending on boto3 version, ignore and lets try something else
+ pass
+ else:
+ raise AnsibleError("Unauthorized operation: %s" % to_native(e))
+
+ # fallback to local list hardcoded in boto3 if still no regions
+ if not regions:
+ session = boto3.Session()
+ regions = session.get_available_regions('ec2')
+
+ # I give up, now you MUST give me regions
+ if not regions:
+ raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
+
+ for region in regions:
+ connection = self._get_connection(credentials, region)
+ try:
+ if iam_role_arn is not None:
+ assumed_credentials = self._boto3_assume_role(credentials, region)
+ else:
+ assumed_credentials = credentials
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ yield connection, region
+
+ def _get_instances_by_region(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions in which to describe instances
+ :param filters: a list of boto3 filter dictionaries
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ :return A list of instance dictionaries
+ '''
+ all_instances = []
+
+ for connection, _region in self._boto3_conn(regions):
+ try:
+ # By default find non-terminated/terminating instances
+ if not any(f['Name'] == 'instance-state-name' for f in filters):
+ filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
+ paginator = connection.get_paginator('describe_instances')
+ reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
+ instances = []
+ for r in reservations:
+ new_instances = r['Instances']
+ for instance in new_instances:
+ instance.update(self._get_reservation_details(r))
+ instances.extend(new_instances)
+ except botocore.exceptions.ClientError as e:
+ if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
+ instances = []
+ else:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+ except botocore.exceptions.BotoCoreError as e:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+
+ all_instances.extend(instances)
+
+ return all_instances
+
+ def _get_reservation_details(self, reservation):
+ return {
+ 'OwnerId': reservation['OwnerId'],
+ 'RequesterId': reservation.get('RequesterId', ''),
+ 'ReservationId': reservation['ReservationId']
+ }
+
+ @classmethod
+ def _get_tag_hostname(cls, preference, instance):
+ tag_hostnames = preference.split('tag:', 1)[1]
+ if ',' in tag_hostnames:
+ tag_hostnames = tag_hostnames.split(',')
+ else:
+ tag_hostnames = [tag_hostnames]
+
+ tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
+ tag_values = []
+ for v in tag_hostnames:
+ if '=' in v:
+ tag_name, tag_value = v.split('=')
+ if tags.get(tag_name) == tag_value:
+ tag_values.append(to_text(tag_name) + "_" + to_text(tag_value))
+ else:
+ tag_value = tags.get(v)
+ if tag_value:
+ tag_values.append(to_text(tag_value))
+ return tag_values
+
+ def _sanitize_hostname(self, hostname):
+ if ':' in to_text(hostname):
+ return self._sanitize_group_name(to_text(hostname))
+ else:
+ return to_text(hostname)
+
+ def _get_preferred_hostname(self, instance, hostnames):
+ '''
+ :param instance: an instance dict returned by boto3 ec2 describe_instances()
+ :param hostnames: a list of hostname destination variables in order of preference
+ :return the preferred identifer for the host
+ '''
+ if not hostnames:
+ hostnames = ['dns-name', 'private-dns-name']
+
+ hostname = None
+ for preference in hostnames:
+ if isinstance(preference, dict):
+ if 'name' not in preference:
+ raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.")
+ hostname = self._get_preferred_hostname(instance, [preference["name"]])
+ hostname_from_prefix = self._get_preferred_hostname(instance, [preference["prefix"]])
+ separator = preference.get("separator", "_")
+ if hostname and hostname_from_prefix and 'prefix' in preference:
+ hostname = hostname_from_prefix + separator + hostname
+ elif preference.startswith('tag:'):
+ tags = self._get_tag_hostname(preference, instance)
+ hostname = tags[0] if tags else None
+ else:
+ hostname = self._get_boto_attr_chain(preference, instance)
+ if hostname:
+ break
+ if hostname:
+ return self._sanitize_hostname(hostname)
+
+ def get_all_hostnames(self, instance, hostnames):
+ '''
+ :param instance: an instance dict returned by boto3 ec2 describe_instances()
+ :param hostnames: a list of hostname destination variables
+ :return all the candidats matching the expectation
+ '''
+ if not hostnames:
+ hostnames = ['dns-name', 'private-dns-name']
+
+ hostname = None
+ hostname_list = []
+ for preference in hostnames:
+ if isinstance(preference, dict):
+ if 'name' not in preference:
+ raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.")
+ hostname = self.get_all_hostnames(instance, [preference["name"]])
+ hostname_from_prefix = self.get_all_hostnames(instance, [preference["prefix"]])
+ separator = preference.get("separator", "_")
+ if hostname and hostname_from_prefix and 'prefix' in preference:
+ hostname = hostname_from_prefix[0] + separator + hostname[0]
+ elif preference.startswith('tag:'):
+ hostname = self._get_tag_hostname(preference, instance)
+ else:
+ hostname = self._get_boto_attr_chain(preference, instance)
+
+ if hostname:
+ if isinstance(hostname, list):
+ for host in hostname:
+ hostname_list.append(self._sanitize_hostname(host))
+ elif isinstance(hostname, str):
+ hostname_list.append(self._sanitize_hostname(hostname))
+
+ return hostname_list
+
+ def _query(self, regions, include_filters, exclude_filters, strict_permissions):
+ '''
+ :param regions: a list of regions to query
+ :param include_filters: a list of boto3 filter dictionaries
+ :param exclude_filters: a list of boto3 filter dictionaries
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+
+ '''
+ instances = []
+ ids_to_ignore = []
+ for filter in exclude_filters:
+ for i in self._get_instances_by_region(
+ regions,
+ ansible_dict_to_boto3_filter_list(filter),
+ strict_permissions):
+ ids_to_ignore.append(i['InstanceId'])
+ for filter in include_filters:
+ for i in self._get_instances_by_region(
+ regions,
+ ansible_dict_to_boto3_filter_list(filter),
+ strict_permissions):
+ if i['InstanceId'] not in ids_to_ignore:
+ instances.append(i)
+ ids_to_ignore.append(i['InstanceId'])
+
+ instances = sorted(instances, key=lambda x: x['InstanceId'])
+
+ return {'aws_ec2': instances}
+
+ def _populate(self, groups, hostnames, allow_duplicated_hosts=False,
+ hostvars_prefix=None, hostvars_suffix=None,
+ use_contrib_script_compatible_ec2_tag_keys=False):
+ for group in groups:
+ group = self.inventory.add_group(group)
+ self._add_hosts(
+ hosts=groups[group],
+ group=group,
+ hostnames=hostnames,
+ allow_duplicated_hosts=allow_duplicated_hosts,
+ hostvars_prefix=hostvars_prefix,
+ hostvars_suffix=hostvars_suffix,
+ use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys)
+ self.inventory.add_child('all', group)
+
+ @classmethod
+ def prepare_host_vars(cls, original_host_vars, hostvars_prefix=None, hostvars_suffix=None,
+ use_contrib_script_compatible_ec2_tag_keys=False):
+ host_vars = camel_dict_to_snake_dict(original_host_vars, ignore_list=['Tags'])
+ host_vars['tags'] = boto3_tag_list_to_ansible_dict(original_host_vars.get('Tags', []))
+
+ # Allow easier grouping by region
+ host_vars['placement']['region'] = host_vars['placement']['availability_zone'][:-1]
+
+ if use_contrib_script_compatible_ec2_tag_keys:
+ for k, v in host_vars['tags'].items():
+ host_vars["ec2_tag_%s" % k] = v
+
+ if hostvars_prefix or hostvars_suffix:
+ for hostvar, hostval in host_vars.copy().items():
+ del host_vars[hostvar]
+ if hostvars_prefix:
+ hostvar = hostvars_prefix + hostvar
+ if hostvars_suffix:
+ hostvar = hostvar + hostvars_suffix
+ host_vars[hostvar] = hostval
+
+ return host_vars
+
+ def iter_entry(self, hosts, hostnames, allow_duplicated_hosts=False, hostvars_prefix=None,
+ hostvars_suffix=None, use_contrib_script_compatible_ec2_tag_keys=False):
+ for host in hosts:
+ if allow_duplicated_hosts:
+ hostname_list = self.get_all_hostnames(host, hostnames)
+ else:
+ hostname_list = [self._get_preferred_hostname(host, hostnames)]
+ if not hostname_list or hostname_list[0] is None:
+ continue
+
+ host_vars = self.prepare_host_vars(
+ host,
+ hostvars_prefix,
+ hostvars_suffix,
+ use_contrib_script_compatible_ec2_tag_keys)
+ for name in hostname_list:
+ yield to_text(name), host_vars
+
+ def _add_hosts(self, hosts, group, hostnames, allow_duplicated_hosts=False,
+ hostvars_prefix=None, hostvars_suffix=None, use_contrib_script_compatible_ec2_tag_keys=False):
+ '''
+ :param hosts: a list of hosts to be added to a group
+ :param group: the name of the group to which the hosts belong
+ :param hostnames: a list of hostname destination variables in order of preference
+ :param bool allow_duplicated_hosts: if true, accept same host with different names
+ :param str hostvars_prefix: starts the hostvars variable name with this prefix
+ :param str hostvars_suffix: ends the hostvars variable name with this suffix
+ :param bool use_contrib_script_compatible_ec2_tag_keys: transform the host name with the legacy naming system
+ '''
+
+ for name, host_vars in self.iter_entry(
+ hosts, hostnames,
+ allow_duplicated_hosts=allow_duplicated_hosts,
+ hostvars_prefix=hostvars_prefix,
+ hostvars_suffix=hostvars_suffix,
+ use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys):
+ self.inventory.add_host(name, group=group)
+ for k, v in host_vars.items():
+ self.inventory.set_variable(name, k, v)
+
+ # Use constructed if applicable
+
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host_vars, name, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host_vars, name, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, name, strict=strict)
+
+ def _set_credentials(self, loader):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+
+ t = Templar(loader=loader)
+ credentials = {}
+
+ for credential_type in ['aws_profile', 'aws_access_key', 'aws_secret_key', 'aws_security_token', 'iam_role_arn']:
+ if t.is_template(self.get_option(credential_type)):
+ credentials[credential_type] = t.template(variable=self.get_option(credential_type), disable_lookups=False)
+ else:
+ credentials[credential_type] = self.get_option(credential_type)
+
+ self.boto_profile = credentials['aws_profile']
+ self.aws_access_key_id = credentials['aws_access_key']
+ self.aws_secret_access_key = credentials['aws_secret_key']
+ self.aws_security_token = credentials['aws_security_token']
+ self.iam_role_arn = credentials['iam_role_arn']
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ session = botocore.session.get_session()
+ try:
+ credentials = session.get_credentials().get_frozen_credentials()
+ except AttributeError:
+ pass
+ else:
+ self.aws_access_key_id = credentials.access_key
+ self.aws_secret_access_key = credentials.secret_key
+ self.aws_security_token = credentials.token
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
+ "inventory configuration file or set them as environment variables.")
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
+ return True
+ self.display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
+ return False
+
+ def build_include_filters(self):
+ if self.get_option('filters'):
+ return [self.get_option('filters')] + self.get_option('include_filters')
+ elif self.get_option('include_filters'):
+ return self.get_option('include_filters')
+ else: # no filter
+ return [{}]
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ if not HAS_BOTO3:
+ raise AnsibleError(missing_required_lib('botocore and boto3'))
+
+ self._read_config_data(path)
+
+ if self.get_option('use_contrib_script_compatible_sanitization'):
+ self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
+
+ self._set_credentials(loader)
+
+ # get user specifications
+ regions = self.get_option('regions')
+ include_filters = self.build_include_filters()
+ exclude_filters = self.get_option('exclude_filters')
+ hostnames = self.get_option('hostnames')
+ strict_permissions = self.get_option('strict_permissions')
+ allow_duplicated_hosts = self.get_option('allow_duplicated_hosts')
+
+ hostvars_prefix = self.get_option("hostvars_prefix")
+ hostvars_suffix = self.get_option("hostvars_suffix")
+ use_contrib_script_compatible_ec2_tag_keys = self.get_option('use_contrib_script_compatible_ec2_tag_keys')
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ if self.get_option('include_extra_api_calls'):
+ self.display.deprecate(
+ "The include_extra_api_calls option has been deprecated "
+ " and will be removed in release 6.0.0.",
+ date='2024-09-01', collection_name='amazon.aws')
+
+ # Generate inventory
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+
+ if not cache or cache_needs_update:
+ results = self._query(regions, include_filters, exclude_filters, strict_permissions)
+
+ self._populate(
+ results,
+ hostnames,
+ allow_duplicated_hosts=allow_duplicated_hosts,
+ hostvars_prefix=hostvars_prefix,
+ hostvars_suffix=hostvars_suffix,
+ use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys)
+
+ # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = results
+
+ @staticmethod
+ def _legacy_script_compatible_group_sanitization(name):
+
+ # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
+ regex = re.compile(r"[^A-Za-z0-9\_\-]")
+
+ return regex.sub('_', name)
diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py b/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py
new file mode 100644
index 00000000..02f86073
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py
@@ -0,0 +1,403 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: aws_rds
+short_description: RDS instance inventory source
+description:
+ - Get instances and clusters from Amazon Web Services RDS.
+ - Uses a YAML configuration file that ends with aws_rds.(yml|yaml).
+options:
+ regions:
+ description:
+ - A list of regions in which to describe RDS instances and clusters. Available regions are listed here
+ U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html).
+ default: []
+ filters:
+ description:
+ - A dictionary of filter value pairs. Available filters are listed here
+ U(https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html#options). If you filter by
+ db-cluster-id and I(include_clusters) is True it will apply to clusters as well.
+ default: {}
+ strict_permissions:
+ description:
+ - By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to
+ False in the inventory config file which will allow the restrictions to be gracefully skipped.
+ type: bool
+ default: True
+ include_clusters:
+ description: Whether or not to query for Aurora clusters as well as instances.
+ type: bool
+ default: False
+ statuses:
+ description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything.
+ type: list
+ elements: str
+ default:
+ - creating
+ - available
+ iam_role_arn:
+ description:
+ - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide
+ AWS credentials with enough privilege to perform the AssumeRole action.
+ hostvars_prefix:
+ description:
+ - The prefix for host variables names coming from AWS.
+ type: str
+ version_added: 3.1.0
+ hostvars_suffix:
+ description:
+ - The suffix for host variables names coming from AWS.
+ type: str
+ version_added: 3.1.0
+notes:
+ - Ansible versions prior to 2.10 should use the fully qualified plugin name 'amazon.aws.aws_rds'.
+extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ - amazon.aws.boto3
+ - amazon.aws.aws_credentials
+author:
+ - Sloane Hertel (@s-hertel)
+'''
+
+EXAMPLES = '''
+plugin: aws_rds
+regions:
+ - us-east-1
+ - ca-central-1
+keyed_groups:
+ - key: 'db_parameter_groups|json_query("[].db_parameter_group_name")'
+ prefix: rds_parameter_group
+ - key: engine
+ prefix: rds
+ - key: tags
+ - key: region
+hostvars_prefix: aws_
+hostvars_suffix: _rds
+'''
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # will be captured by imported HAS_BOTO3
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.plugins.inventory import Cacheable
+from ansible.plugins.inventory import Constructable
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'amazon.aws.aws_rds'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+ self.credentials = {}
+ self.boto_profile = None
+ self.iam_role_arn = None
+
+ def _get_connection(self, credentials, region='us-east-1'):
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ return connection
+
+ def _boto3_assume_role(self, credentials, region):
+ """
+ Assume an IAM role passed by iam_role_arn parameter
+ :return: a dict containing the credentials of the assumed role
+ """
+
+ iam_role_arn = self.iam_role_arn
+
+ try:
+ sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
+ sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_rds_dynamic_inventory')
+ return dict(
+ aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
+ aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
+ aws_session_token=sts_session['Credentials']['SessionToken']
+ )
+ except botocore.exceptions.ClientError as e:
+ raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
+
+ def _boto3_conn(self, regions):
+ '''
+ :param regions: A list of regions to create a boto3 client
+
+ Generator that yields a boto3 client and the region
+ '''
+ iam_role_arn = self.iam_role_arn
+ credentials = self.credentials
+ for region in regions:
+ try:
+ if iam_role_arn is not None:
+ assumed_credentials = self._boto3_assume_role(credentials, region)
+ else:
+ assumed_credentials = credentials
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **assumed_credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ yield connection, region
+
+ def _get_hosts_by_region(self, connection, filters, strict):
+
+ def _add_tags_for_hosts(connection, hosts, strict):
+ for host in hosts:
+ if 'DBInstanceArn' in host:
+ resource_arn = host['DBInstanceArn']
+ else:
+ resource_arn = host['DBClusterArn']
+
+ try:
+ tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList']
+ except is_boto3_error_code('AccessDenied') as e:
+ if not strict:
+ tags = []
+ else:
+ raise e
+ host['Tags'] = tags
+
+ def wrapper(f, *args, **kwargs):
+ try:
+ results = f(*args, **kwargs)
+ if 'DBInstances' in results:
+ results = results['DBInstances']
+ else:
+ results = results['DBClusters']
+ _add_tags_for_hosts(connection, results, strict)
+ except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except
+ if not strict:
+ results = []
+ else:
+ raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
+ return results
+ return wrapper
+
+ def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False):
+ '''
+ :param regions: a list of regions in which to describe hosts
+ :param instance_filters: a list of boto3 filter dictionaries
+ :param cluster_filters: a list of boto3 filter dictionaries
+ :param strict: a boolean determining whether to fail or ignore 403 error codes
+ :param statuses: a list of statuses that the returned hosts should match
+ :return A list of host dictionaries
+ '''
+ all_instances = []
+ all_clusters = []
+ for connection, _region in self._boto3_conn(regions):
+ paginator = connection.get_paginator('describe_db_instances')
+ all_instances.extend(
+ self._get_hosts_by_region(connection, instance_filters, strict)
+ (paginator.paginate(Filters=instance_filters).build_full_result)
+ )
+ if gather_clusters:
+ all_clusters.extend(
+ self._get_hosts_by_region(connection, cluster_filters, strict)
+ (connection.describe_db_clusters, **{'Filters': cluster_filters})
+ )
+ sorted_hosts = list(
+ sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) +
+ sorted(all_clusters, key=lambda x: x['DBClusterIdentifier'])
+ )
+ return self.find_hosts_with_valid_statuses(sorted_hosts, statuses)
+
+ def find_hosts_with_valid_statuses(self, hosts, statuses):
+ if 'all' in statuses:
+ return hosts
+ valid_hosts = []
+ for host in hosts:
+ if host.get('DBInstanceStatus') in statuses:
+ valid_hosts.append(host)
+ elif host.get('Status') in statuses:
+ valid_hosts.append(host)
+ return valid_hosts
+
+ def _populate(self, hosts):
+ group = 'aws_rds'
+ self.inventory.add_group(group)
+ if hosts:
+ self._add_hosts(hosts=hosts, group=group)
+ self.inventory.add_child('all', group)
+
+ def _populate_from_source(self, source_data):
+ hostvars = source_data.pop('_meta', {}).get('hostvars', {})
+ for group in source_data:
+ if group == 'all':
+ continue
+ else:
+ self.inventory.add_group(group)
+ hosts = source_data[group].get('hosts', [])
+ for host in hosts:
+ self._populate_host_vars([host], hostvars.get(host, {}), group)
+ self.inventory.add_child('all', group)
+
+ def _get_hostname(self, host):
+ if host.get('DBInstanceIdentifier'):
+ return host['DBInstanceIdentifier']
+ else:
+ return host['DBClusterIdentifier']
+
+ def _format_inventory(self, hosts):
+ results = {'_meta': {'hostvars': {}}}
+ group = 'aws_rds'
+ results[group] = {'hosts': []}
+ for host in hosts:
+ hostname = self._get_hostname(host)
+ results[group]['hosts'].append(hostname)
+ h = self.inventory.get_host(hostname)
+ results['_meta']['hostvars'][h.name] = h.vars
+ return results
+
+ def _add_hosts(self, hosts, group):
+ '''
+ :param hosts: a list of hosts to be added to a group
+ :param group: the name of the group to which the hosts belong
+ '''
+ for host in hosts:
+ hostname = self._get_hostname(host)
+ host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
+ host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
+
+ # Allow easier grouping by region
+ if 'availability_zone' in host:
+ host['region'] = host['availability_zone'][:-1]
+ elif 'availability_zones' in host:
+ host['region'] = host['availability_zones'][0][:-1]
+
+ self.inventory.add_host(hostname, group=group)
+ hostvars_prefix = self.get_option("hostvars_prefix")
+ hostvars_suffix = self.get_option("hostvars_suffix")
+ new_vars = dict()
+ for hostvar, hostval in host.items():
+ if hostvars_prefix:
+ hostvar = hostvars_prefix + hostvar
+ if hostvars_suffix:
+ hostvar = hostvar + hostvars_suffix
+ new_vars[hostvar] = hostval
+ self.inventory.set_variable(hostname, hostvar, hostval)
+ host.update(new_vars)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def _set_credentials(self):
+ '''
+ '''
+ self.boto_profile = self.get_option('aws_profile')
+ aws_access_key_id = self.get_option('aws_access_key')
+ aws_secret_access_key = self.get_option('aws_secret_key')
+ aws_security_token = self.get_option('aws_security_token')
+ self.iam_role_arn = self.get_option('iam_role_arn')
+
+ if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
+ session = botocore.session.get_session()
+ if session.get_credentials() is not None:
+ aws_access_key_id = session.get_credentials().access_key
+ aws_secret_access_key = session.get_credentials().secret_key
+ aws_security_token = session.get_credentials().token
+
+ if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
+ raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
+ "inventory configuration file or set them as environment variables.")
+
+ if aws_access_key_id:
+ self.credentials['aws_access_key_id'] = aws_access_key_id
+ if aws_secret_access_key:
+ self.credentials['aws_secret_access_key'] = aws_secret_access_key
+ if aws_security_token:
+ self.credentials['aws_session_token'] = aws_security_token
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('aws_rds.yml', 'aws_rds.yaml')):
+ return True
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ if not HAS_BOTO3:
+ raise AnsibleError(missing_required_lib('botocore and boto3'))
+
+ self._read_config_data(path)
+ self._set_credentials()
+
+ # get user specifications
+ regions = self.get_option('regions')
+ filters = self.get_option('filters')
+ strict_permissions = self.get_option('strict_permissions')
+ statuses = self.get_option('statuses')
+ include_clusters = self.get_option('include_clusters')
+ instance_filters = ansible_dict_to_boto3_filter_list(filters)
+ cluster_filters = []
+ if 'db-cluster-id' in filters and include_clusters:
+ cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']})
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ formatted_inventory = {}
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+ else:
+ self._populate_from_source(results)
+
+ if not cache or cache_needs_update:
+ results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters)
+ self._populate(results)
+ formatted_inventory = self._format_inventory(results)
+
+ # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = formatted_inventory
diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py b/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py
new file mode 100644
index 00000000..b04731f1
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py
@@ -0,0 +1,136 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: aws_account_attribute
+author:
+ - Sloane Hertel (@s-hertel) <shertel@redhat.com>
+extends_documentation_fragment:
+ - amazon.aws.boto3
+ - amazon.aws.aws_credentials
+ - amazon.aws.aws_region
+short_description: Look up AWS account attributes
+description:
+ - Describes attributes of your AWS account. You can specify one of the listed
+ attribute choices or omit it to see all attributes.
+options:
+ attribute:
+ description: The attribute for which to get the value(s).
+ choices:
+ - supported-platforms
+ - default-vpc
+ - max-instances
+ - vpc-max-security-groups-per-interface
+ - max-elastic-ips
+ - vpc-max-elastic-ips
+ - has-ec2-classic
+'''
+
+EXAMPLES = """
+vars:
+ has_ec2_classic: "{{ lookup('aws_account_attribute', attribute='has-ec2-classic') }}"
+ # true | false
+
+ default_vpc_id: "{{ lookup('aws_account_attribute', attribute='default-vpc') }}"
+ # vpc-xxxxxxxx | none
+
+ account_details: "{{ lookup('aws_account_attribute', wantlist='true') }}"
+ # {'default-vpc': ['vpc-xxxxxxxx'], 'max-elastic-ips': ['5'], 'max-instances': ['20'],
+ # 'supported-platforms': ['VPC', 'EC2'], 'vpc-max-elastic-ips': ['5'], 'vpc-max-security-groups-per-interface': ['5']}
+
+"""
+
+RETURN = """
+_raw:
+ description:
+ Returns a boolean when I(attribute) is check_ec2_classic. Otherwise returns the value(s) of the attribute
+ (or all attributes if one is not specified).
+"""
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # will be captured by imported HAS_BOTO3
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+from ansible.plugins.lookup import LookupBase
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+
+def _boto3_conn(region, credentials):
+ boto_profile = credentials.pop('aws_profile', None)
+
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
+ if boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
+ raise AnsibleError("Insufficient credentials found.")
+ else:
+ raise AnsibleError("Insufficient credentials found.")
+ return connection
+
+
+def _get_credentials(options):
+ credentials = {}
+ credentials['aws_profile'] = options['aws_profile']
+ credentials['aws_secret_access_key'] = options['aws_secret_key']
+ credentials['aws_access_key_id'] = options['aws_access_key']
+ if options['aws_security_token']:
+ credentials['aws_session_token'] = options['aws_security_token']
+
+ return credentials
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_account_attributes(client, **params):
+ return client.describe_account_attributes(**params)
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+
+ if not HAS_BOTO3:
+ raise AnsibleError(missing_required_lib('botocore and boto3'))
+
+ self.set_options(var_options=variables, direct=kwargs)
+ boto_credentials = _get_credentials(self._options)
+
+ region = self._options['region']
+ client = _boto3_conn(region, boto_credentials)
+
+ attribute = kwargs.get('attribute')
+ params = {'AttributeNames': []}
+ check_ec2_classic = False
+ if 'has-ec2-classic' == attribute:
+ check_ec2_classic = True
+ params['AttributeNames'] = ['supported-platforms']
+ elif attribute:
+ params['AttributeNames'] = [attribute]
+
+ try:
+ response = _describe_account_attributes(client, **params)['AccountAttributes']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ raise AnsibleError("Failed to describe account attributes: %s" % to_native(e))
+
+ if check_ec2_classic:
+ attr = response[0]
+ return any(value['AttributeValue'] == 'EC2' for value in attr['AttributeValues'])
+
+ if attribute:
+ attr = response[0]
+ return [value['AttributeValue'] for value in attr['AttributeValues']]
+
+ flattened = {}
+ for k_v_dict in response:
+ flattened[k_v_dict['AttributeName']] = [value['AttributeValue'] for value in k_v_dict['AttributeValues']]
+ return flattened
diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py b/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py
new file mode 100644
index 00000000..7cfd5b51
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py
@@ -0,0 +1,295 @@
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+name: aws_secret
+author:
+ - Aaron Smith (!UNKNOWN) <ajsmith10381@gmail.com>
+extends_documentation_fragment:
+ - amazon.aws.boto3
+ - amazon.aws.aws_credentials
+ - amazon.aws.aws_region
+
+short_description: Look up secrets stored in AWS Secrets Manager
+description:
+ - Look up secrets stored in AWS Secrets Manager provided the caller
+ has the appropriate permissions to read the secret.
+ - Lookup is based on the secret's I(Name) value.
+ - Optional parameters can be passed into this lookup; I(version_id) and I(version_stage)
+options:
+ _terms:
+ description: Name of the secret to look up in AWS Secrets Manager.
+ required: True
+ bypath:
+ description: A boolean to indicate whether the parameter is provided as a hierarchy.
+ default: false
+ type: boolean
+ version_added: 1.4.0
+ nested:
+ description: A boolean to indicate the secret contains nested values.
+ type: boolean
+ default: false
+ version_added: 1.4.0
+ version_id:
+ description: Version of the secret(s).
+ required: False
+ version_stage:
+ description: Stage of the secret version.
+ required: False
+ join:
+ description:
+ - Join two or more entries to form an extended secret.
+ - This is useful for overcoming the 4096 character limit imposed by AWS.
+ - No effect when used with I(bypath).
+ type: boolean
+ default: false
+ on_deleted:
+ description:
+ - Action to take if the secret has been marked for deletion.
+ - C(error) will raise a fatal error when the secret has been marked for deletion.
+ - C(skip) will silently ignore the deleted secret.
+ - C(warn) will skip over the deleted secret but issue a warning.
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+ version_added: 2.0.0
+ on_missing:
+ description:
+ - Action to take if the secret is missing.
+ - C(error) will raise a fatal error when the secret is missing.
+ - C(skip) will silently ignore the missing secret.
+ - C(warn) will skip over the missing secret but issue a warning.
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+ on_denied:
+ description:
+ - Action to take if access to the secret is denied.
+ - C(error) will raise a fatal error when access to the secret is denied.
+ - C(skip) will silently ignore the denied secret.
+ - C(warn) will skip over the denied secret but issue a warning.
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+'''
+
+EXAMPLES = r"""
+ - name: lookup secretsmanager secret in the current region
+ debug: msg="{{ lookup('amazon.aws.aws_secret', '/path/to/secrets', bypath=true) }}"
+
+ - name: Create RDS instance with aws_secret lookup for password param
+ rds:
+ command: create
+ instance_name: app-db
+ db_engine: MySQL
+ size: 10
+ instance_type: db.m1.small
+ username: dbadmin
+ password: "{{ lookup('amazon.aws.aws_secret', 'DbSecret') }}"
+ tags:
+ Environment: staging
+
+ - name: skip if secret does not exist
+ debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-not-exist', on_missing='skip')}}"
+
+ - name: warn if access to the secret is denied
+ debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-denied', on_denied='warn')}}"
+
+ - name: lookup secretsmanager secret in the current region using the nested feature
+ debug: msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', nested=true) }}"
+ # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`.
+ # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`.
+ - name: lookup secretsmanager secret in a specific region using specified region and aws profile using nested feature
+ debug: >
+ msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', region=region, aws_profile=aws_profile,
+ aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, nested=true) }}"
+ # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`.
+ # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`.
+ # Region is the AWS region where the AWS secret is stored.
+ # AWS_profile is the aws profile to use, that has access to the AWS secret.
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ Returns the value of the secret stored in AWS Secrets Manager.
+"""
+
+import json
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # will be captured by imported HAS_BOTO3
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+from ansible.plugins.lookup import LookupBase
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+
+def _boto3_conn(region, credentials):
+ boto_profile = credentials.pop('aws_profile', None)
+
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
+ if boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
+ raise AnsibleError("Insufficient credentials found.")
+ else:
+ raise AnsibleError("Insufficient credentials found.")
+ return connection
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
+ aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
+ bypath=False, nested=False, join=False, version_stage=None, version_id=None, on_missing='error',
+ on_denied='error', on_deleted='error'):
+ '''
+ :arg terms: a list of lookups to run.
+ e.g. ['parameter_name', 'parameter_name_too' ]
+ :kwarg variables: ansible variables active at the time of the lookup
+ :kwarg aws_secret_key: identity of the AWS key to use
+ :kwarg aws_access_key: AWS secret key (matching identity)
+ :kwarg aws_security_token: AWS session key if using STS
+ :kwarg decrypt: Set to True to get decrypted parameters
+ :kwarg region: AWS region in which to do the lookup
+ :kwarg bypath: Set to True to do a lookup of variables under a path
+ :kwarg nested: Set to True to do a lookup of nested secrets
+ :kwarg join: Join two or more entries to form an extended secret
+ :kwarg version_stage: Stage of the secret version
+ :kwarg version_id: Version of the secret(s)
+ :kwarg on_missing: Action to take if the secret is missing
+ :kwarg on_deleted: Action to take if the secret is marked for deletion
+ :kwarg on_denied: Action to take if access to the secret is denied
+ :returns: A list of parameter values or a list of dictionaries if bypath=True.
+ '''
+ if not HAS_BOTO3:
+ raise AnsibleError(missing_required_lib('botocore and boto3'))
+
+ deleted = on_deleted.lower()
+ if not isinstance(deleted, string_types) or deleted not in ['error', 'warn', 'skip']:
+ raise AnsibleError('"on_deleted" must be a string and one of "error", "warn" or "skip", not %s' % deleted)
+
+ missing = on_missing.lower()
+ if not isinstance(missing, string_types) or missing not in ['error', 'warn', 'skip']:
+ raise AnsibleError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % missing)
+
+ denied = on_denied.lower()
+ if not isinstance(denied, string_types) or denied not in ['error', 'warn', 'skip']:
+ raise AnsibleError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % denied)
+
+ credentials = {}
+ if aws_profile:
+ credentials['aws_profile'] = aws_profile
+ else:
+ credentials['aws_profile'] = boto_profile
+ credentials['aws_secret_access_key'] = aws_secret_key
+ credentials['aws_access_key_id'] = aws_access_key
+ credentials['aws_session_token'] = aws_security_token
+
+ # fallback to IAM role credentials
+ if not credentials['aws_profile'] and not (
+ credentials['aws_access_key_id'] and credentials['aws_secret_access_key']):
+ session = botocore.session.get_session()
+ if session.get_credentials() is not None:
+ credentials['aws_access_key_id'] = session.get_credentials().access_key
+ credentials['aws_secret_access_key'] = session.get_credentials().secret_key
+ credentials['aws_session_token'] = session.get_credentials().token
+
+ client = _boto3_conn(region, credentials)
+
+ if bypath:
+ secrets = {}
+ for term in terms:
+ try:
+ paginator = client.get_paginator('list_secrets')
+ paginator_response = paginator.paginate(
+ Filters=[{'Key': 'name', 'Values': [term]}])
+ for object in paginator_response:
+ if 'SecretList' in object:
+ for secret_obj in object['SecretList']:
+ secrets.update({secret_obj['Name']: self.get_secret_value(
+ secret_obj['Name'], client, on_missing=missing, on_denied=denied)})
+ secrets = [secrets]
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ raise AnsibleError("Failed to retrieve secret: %s" % to_native(e))
+ else:
+ secrets = []
+ for term in terms:
+ value = self.get_secret_value(term, client,
+ version_stage=version_stage, version_id=version_id,
+ on_missing=missing, on_denied=denied, on_deleted=deleted,
+ nested=nested)
+ if value:
+ secrets.append(value)
+ if join:
+ joined_secret = []
+ joined_secret.append(''.join(secrets))
+ return joined_secret
+
+ return secrets
+
+ def get_secret_value(self, term, client, version_stage=None, version_id=None, on_missing=None, on_denied=None, on_deleted=None, nested=False):
+ params = {}
+ params['SecretId'] = term
+ if version_id:
+ params['VersionId'] = version_id
+ if version_stage:
+ params['VersionStage'] = version_stage
+ if nested:
+ if len(term.split('.')) < 2:
+ raise AnsibleError("Nested query must use the following syntax: `aws_secret_name.<key_name>.<key_name>")
+ secret_name = term.split('.')[0]
+ params['SecretId'] = secret_name
+
+ try:
+ response = client.get_secret_value(**params)
+ if 'SecretBinary' in response:
+ return response['SecretBinary']
+ if 'SecretString' in response:
+ if nested:
+ query = term.split('.')[1:]
+ secret_string = json.loads(response['SecretString'])
+ ret_val = secret_string
+ for key in query:
+ if key in ret_val:
+ ret_val = ret_val[key]
+ else:
+ raise AnsibleError("Successfully retrieved secret but there exists no key {0} in the secret".format(key))
+ return str(ret_val)
+ else:
+ return response['SecretString']
+ except is_boto3_error_message('marked for deletion'):
+ if on_deleted == 'error':
+ raise AnsibleError("Failed to find secret %s (marked for deletion)" % term)
+ elif on_deleted == 'warn':
+ self._display.warning('Skipping, did not find secret (marked for deletion) %s' % term)
+ except is_boto3_error_code('ResourceNotFoundException'): # pylint: disable=duplicate-except
+ if on_missing == 'error':
+ raise AnsibleError("Failed to find secret %s (ResourceNotFound)" % term)
+ elif on_missing == 'warn':
+ self._display.warning('Skipping, did not find secret %s' % term)
+ except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except
+ if on_denied == 'error':
+ raise AnsibleError("Failed to access secret %s (AccessDenied)" % term)
+ elif on_denied == 'warn':
+ self._display.warning('Skipping, access denied for secret %s' % term)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ raise AnsibleError("Failed to retrieve secret: %s" % to_native(e))
+
+ return None
diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
new file mode 100644
index 00000000..bd34d1bd
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
@@ -0,0 +1,90 @@
+# (c) 2016 James Turner <turnerjsm@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: aws_service_ip_ranges
+author:
+ - James Turner (!UNKNOWN) <turnerjsm@gmail.com>
+requirements:
+ - must have public internet connectivity
+short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
+description:
+ - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
+ - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
+options:
+ service:
+ description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
+ region:
+ description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
+ ipv6_prefixes:
+ description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses'
+ version_added: 2.1.0
+'''
+
+EXAMPLES = """
+vars:
+ ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
+tasks:
+
+- name: "use list return option and iterate as a loop"
+ debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
+# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
+
+- name: "Pull S3 IP ranges, and print the default return style"
+ debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
+# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
+"""
+
+RETURN = """
+_raw:
+ description: comma-separated list of CIDR ranges
+"""
+
+import json
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.six.moves.urllib.error import URLError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import ConnectionError
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.urls import SSLValidationError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ if "ipv6_prefixes" in kwargs and kwargs["ipv6_prefixes"]:
+ prefixes_label = "ipv6_prefixes"
+ ip_prefix_label = "ipv6_prefix"
+ else:
+ prefixes_label = "prefixes"
+ ip_prefix_label = "ip_prefix"
+
+ try:
+ resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
+ amazon_response = json.load(resp)[prefixes_label]
+ except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
+ # on Python 3+, json.decoder.JSONDecodeError is raised for bad
+ # JSON. On 2.x it's a ValueError
+ raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
+ except HTTPError as e:
+ raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
+ except SSLValidationError as e:
+ raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
+ except URLError as e:
+ raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
+ except ConnectionError as e:
+ raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
+
+ if 'region' in kwargs:
+ region = kwargs['region']
+ amazon_response = (item for item in amazon_response if item['region'] == region)
+ if 'service' in kwargs:
+ service = str.upper(kwargs['service'])
+ amazon_response = (item for item in amazon_response if item['service'] == service)
+ iprange = [item[ip_prefix_label] for item in amazon_response]
+ return iprange
diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py b/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py
new file mode 100644
index 00000000..81e4cbb9
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py
@@ -0,0 +1,286 @@
+# (c) 2016, Bill Wang <ozbillwang(at)gmail.com>
+# (c) 2017, Marat Bakeev <hawara(at)gmail.com>
+# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: aws_ssm
+author:
+ - Bill Wang (!UNKNOWN) <ozbillwang(at)gmail.com>
+ - Marat Bakeev (!UNKNOWN) <hawara(at)gmail.com>
+ - Michael De La Rue (!UNKNOWN) <siblemitcom.mddlr@spamgourmet.com>
+short_description: Get the value for a SSM parameter or all parameters under a path
+description:
+ - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters.
+ The first argument you pass the lookup can either be a parameter name or a hierarchy of
+ parameters. Hierarchies start with a forward slash and end with the parameter name. Up to
+ 5 layers may be specified.
+ - If looking up an explicitly listed parameter by name which does not exist then the lookup
+ will generate an error. You can use the ```default``` filter to give a default value in
+ this case but must set the ```on_missing``` parameter to ```skip``` or ```warn```. You must
+ also set the second parameter of the ```default``` filter to ```true``` (see examples below).
+ - When looking up a path for parameters under it a dictionary will be returned for each path.
+ If there is no parameter under that path then the lookup will generate an error.
+ - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm
+ will generate an error. If you want to continue in this case then you will have to set up
+ two ansible tasks, one which sets a variable and ignores failures and one which uses the value
+ of that variable with a default. See the examples below.
+
+options:
+ decrypt:
+ description: A boolean to indicate whether to decrypt the parameter.
+ default: true
+ type: boolean
+ bypath:
+ description: A boolean to indicate whether the parameter is provided as a hierarchy.
+ default: false
+ type: boolean
+ recursive:
+ description: A boolean to indicate whether to retrieve all parameters within a hierarchy.
+ default: false
+ type: boolean
+ shortnames:
+ description: Indicates whether to return the name only without path if using a parameter hierarchy.
+ default: false
+ type: boolean
+ on_missing:
+ description:
+ - Action to take if the SSM parameter is missing.
+ - C(error) will raise a fatal error when the SSM parameter is missing.
+ - C(skip) will silently ignore the missing SSM parameter.
+ - C(warn) will skip over the missing SSM parameter but issue a warning.
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+ version_added: 2.0.0
+ on_denied:
+ description:
+ - Action to take if access to the SSM parameter is denied.
+ - C(error) will raise a fatal error when access to the SSM parameter is denied.
+ - C(skip) will silently ignore the denied SSM parameter.
+ - C(warn) will skip over the denied SSM parameter but issue a warning.
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+ version_added: 2.0.0
+ endpoint:
+ description: Use a custom endpoint when connecting to SSM service.
+ type: string
+ version_added: 3.3.0
+extends_documentation_fragment:
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# lookup sample:
+- name: lookup ssm parameter store in the current region
+ debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}"
+
+- name: lookup ssm parameter store in specified region
+ debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}"
+
+- name: lookup ssm parameter store without decryption
+ debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}"
+
+- name: lookup ssm parameter store using a specified aws profile
+ debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}"
+
+- name: lookup ssm parameter store using explicit aws credentials
+ debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}"
+
+- name: lookup ssm parameter store with all options
+ debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}"
+
+- name: lookup ssm parameter and fail if missing
+ debug: msg="{{ lookup('aws_ssm', 'missing-parameter') }}"
+
+- name: lookup a key which doesn't exist, returning a default ('root')
+ debug: msg="{{ lookup('aws_ssm', 'AdminID', on_missing="skip") | default('root', true) }}"
+
+- name: lookup a key which doesn't exist failing to store it in a fact
+ set_fact:
+ temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}"
+ ignore_errors: true
+
+- name: show fact default to "access failed" if we don't have access
+ debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}"
+
+- name: return a dictionary of ssm parameters from a hierarchy path
+ debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}"
+
+- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param)
+ debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}"
+
+- name: Iterate over a parameter hierarchy (one iteration per parameter)
+ debug: msg='Key contains {{ item.key }} , with value {{ item.value }}'
+ loop: '{{ lookup("aws_ssm", "/demo/", region="ap-southeast-2", bypath=True) | dict2items }}'
+
+- name: Iterate over multiple paths as dictionaries (one iteration per path)
+ debug: msg='Path contains {{ item }}'
+ loop: '{{ lookup("aws_ssm", "/demo/", "/demo1/", bypath=True)}}'
+
+- name: lookup ssm parameter warn if access is denied
+ debug: msg="{{ lookup('aws_ssm', 'missing-parameter', on_denied="warn" ) }}"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # will be captured by imported HAS_BOTO3
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+from ansible.module_utils.six import string_types
+from ansible.module_utils.basic import missing_required_lib
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
+ aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
+ bypath=False, shortnames=False, recursive=False, decrypt=True, on_missing="error",
+ on_denied="error", endpoint=None):
+ '''
+ :arg terms: a list of lookups to run.
+ e.g. ['parameter_name', 'parameter_name_too' ]
+ :kwarg variables: ansible variables active at the time of the lookup
+ :kwarg aws_secret_key: identity of the AWS key to use
+ :kwarg aws_access_key: AWS secret key (matching identity)
+ :kwarg aws_security_token: AWS session key if using STS
+ :kwarg decrypt: Set to True to get decrypted parameters
+ :kwarg region: AWS region in which to do the lookup
+ :kwarg bypath: Set to True to do a lookup of variables under a path
+ :kwarg recursive: Set to True to recurse below the path (requires bypath=True)
+ :kwarg on_missing: Action to take if the SSM parameter is missing
+ :kwarg on_denied: Action to take if access to the SSM parameter is denied
+ :kwarg endpoint: Endpoint for SSM client
+ :returns: A list of parameter values or a list of dictionaries if bypath=True.
+ '''
+
+ if not HAS_BOTO3:
+ raise AnsibleError(missing_required_lib('botocore and boto3'))
+
+ # validate arguments 'on_missing' and 'on_denied'
+ if on_missing is not None and (not isinstance(on_missing, string_types) or on_missing.lower() not in ['error', 'warn', 'skip']):
+ raise AnsibleError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % on_missing)
+ if on_denied is not None and (not isinstance(on_denied, string_types) or on_denied.lower() not in ['error', 'warn', 'skip']):
+ raise AnsibleError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % on_denied)
+
+ ret = []
+ ssm_dict = {}
+
+ self.params = variables
+
+ cli_region, cli_endpoint, cli_boto_params = get_aws_connection_info(self, boto3=True)
+
+ if region:
+ cli_region = region
+
+ if endpoint:
+ cli_endpoint = endpoint
+
+ # For backward compatibility
+ if aws_access_key:
+ cli_boto_params.update({'aws_access_key_id': aws_access_key})
+ if aws_secret_key:
+ cli_boto_params.update({'aws_secret_access_key': aws_secret_key})
+ if aws_security_token:
+ cli_boto_params.update({'aws_session_token': aws_security_token})
+ if boto_profile:
+ cli_boto_params.update({'profile_name': boto_profile})
+ if aws_profile:
+ cli_boto_params.update({'profile_name': aws_profile})
+
+ cli_boto_params.update(dict(
+ conn_type='client',
+ resource='ssm',
+ region=cli_region,
+ endpoint=cli_endpoint,
+ ))
+
+ client = boto3_conn(module=self, **cli_boto_params)
+
+ ssm_dict['WithDecryption'] = decrypt
+
+ # Lookup by path
+ if bypath:
+ ssm_dict['Recursive'] = recursive
+ for term in terms:
+ display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region))
+
+ paramlist = self.get_path_parameters(client, ssm_dict, term, on_missing.lower(), on_denied.lower())
+ # Shorten parameter names. Yes, this will return
+ # duplicate names with different values.
+ if shortnames:
+ for x in paramlist:
+ x['Name'] = x['Name'][x['Name'].rfind('/') + 1:]
+
+ display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist))
+
+ ret.append(boto3_tag_list_to_ansible_dict(paramlist,
+ tag_name_key_name="Name",
+ tag_value_key_name="Value"))
+ # Lookup by parameter name - always returns a list with one or
+ # no entry.
+ else:
+ display.vvv("AWS_ssm name lookup term: %s" % terms)
+ for term in terms:
+ ret.append(self.get_parameter_value(client, ssm_dict, term, on_missing.lower(), on_denied.lower()))
+ display.vvvv("AWS_ssm path lookup returning: %s " % str(ret))
+ return ret
+
+ def get_path_parameters(self, client, ssm_dict, term, on_missing, on_denied):
+ ssm_dict["Path"] = term
+ paginator = client.get_paginator('get_parameters_by_path')
+ try:
+ paramlist = paginator.paginate(**ssm_dict).build_full_result()['Parameters']
+ except is_boto3_error_code('AccessDeniedException'):
+ if on_denied == 'error':
+ raise AnsibleError("Failed to access SSM parameter path %s (AccessDenied)" % term)
+ elif on_denied == 'warn':
+ self._display.warning('Skipping, access denied for SSM parameter path %s' % term)
+ paramlist = [{}]
+ elif on_denied == 'skip':
+ paramlist = [{}]
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
+
+ if not len(paramlist):
+ if on_missing == "error":
+ raise AnsibleError("Failed to find SSM parameter path %s (ResourceNotFound)" % term)
+ elif on_missing == "warn":
+ self._display.warning('Skipping, did not find SSM parameter path %s' % term)
+
+ return paramlist
+
+ def get_parameter_value(self, client, ssm_dict, term, on_missing, on_denied):
+ ssm_dict["Name"] = term
+ try:
+ response = client.get_parameter(**ssm_dict)
+ return response['Parameter']['Value']
+ except is_boto3_error_code('ParameterNotFound'):
+ if on_missing == 'error':
+ raise AnsibleError("Failed to find SSM parameter %s (ResourceNotFound)" % term)
+ elif on_missing == 'warn':
+ self._display.warning('Skipping, did not find SSM parameter %s' % term)
+ except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except
+ if on_denied == 'error':
+ raise AnsibleError("Failed to access SSM parameter %s (AccessDenied)" % term)
+ elif on_denied == 'warn':
+ self._display.warning('Skipping, access denied for SSM parameter %s' % term)
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
+ return None
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/_version.py b/ansible_collections/amazon/aws/plugins/module_utils/_version.py
new file mode 100644
index 00000000..d91cf3ab
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/_version.py
@@ -0,0 +1,344 @@
+# Vendored copy of distutils/version.py from CPython 3.9.5
+#
+# Implements multiple version numbering conventions for the
+# Python Module Distribution Utilities.
+#
+# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+#
+
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering). There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+
+Every version number class implements the following interface:
+ * the 'parse' method takes a string and parses it to some internal
+ representation; if the string is an invalid version number,
+ 'parse' raises a ValueError exception
+ * the class constructor takes an optional string argument which,
+ if supplied, is passed to 'parse'
+ * __str__ reconstructs the string that was passed to 'parse' (or
+ an equivalent string -- ie. one that will generate an equivalent
+ version number instance)
+ * __repr__ generates Python code to recreate the version number instance
+ * _cmp compares the current instance with either another instance
+ of the same class or a string (which will be parsed to an instance
+ of the same class, thus must follow the same rules)
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import re
+
+try:
+ RE_FLAGS = re.VERBOSE | re.ASCII
+except AttributeError:
+ RE_FLAGS = re.VERBOSE
+
+
+class Version:
+ """Abstract base class for version numbering classes. Just provides
+ constructor (__init__) and reproducer (__repr__), because those
+ seem to be the same for all version numbering classes; and route
+ rich comparisons to _cmp.
+ """
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__(self):
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
+
+ def __eq__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c == 0
+
+ def __lt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c < 0
+
+ def __le__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c <= 0
+
+ def __gt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c > 0
+
+ def __ge__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c >= 0
+
+
+# Interface for version-number classes -- must be implemented
+# by the following classes (the concrete ones -- Version should
+# be treated as an abstract class).
+# __init__ (string) - create and take same action as 'parse'
+# (string parameter is optional)
+# parse (string) - convert a string representation to whatever
+# internal representation is appropriate for
+# this style of version numbering
+# __str__ (self) - convert back to a string; should be very similar
+# (if not identical to) the string supplied to parse
+# __repr__ (self) - generate Python code to recreate
+# the instance
+# _cmp (self, other) - compare two version numbers ('other' may
+# be an unparsed version string, or another
+# instance of your version class)
+
+
+class StrictVersion(Version):
+ """Version numbering for anal retentives and software idealists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of two or three
+ dot-separated numeric components, with an optional "pre-release" tag
+ on the end. The pre-release tag consists of the letter 'a' or 'b'
+ followed by a number. If the numeric components of two version
+ numbers are equal, then one with a pre-release tag will always
+ be deemed earlier (lesser) than one without.
+
+ The following are valid version numbers (shown in the order that
+ would be obtained by sorting according to the supplied cmp function):
+
+ 0.4 0.4.0 (these two are equivalent)
+ 0.4.1
+ 0.5a1
+ 0.5b3
+ 0.5
+ 0.9.6
+ 1.0
+ 1.0.4a3
+ 1.0.4b1
+ 1.0.4
+
+ The following are examples of invalid version numbers:
+
+ 1
+ 2.7.2.2
+ 1.3.a4
+ 1.3pl1
+ 1.3c4
+
+ The rationale for this version numbering system will be explained
+ in the distutils documentation.
+ """
+
+ version_re = re.compile(r"^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$", RE_FLAGS)
+
+ def parse(self, vstring):
+ match = self.version_re.match(vstring)
+ if not match:
+ raise ValueError("invalid version number '%s'" % vstring)
+
+ (major, minor, patch, prerelease, prerelease_num) = match.group(1, 2, 4, 5, 6)
+
+ if patch:
+ self.version = tuple(map(int, [major, minor, patch]))
+ else:
+ self.version = tuple(map(int, [major, minor])) + (0,)
+
+ if prerelease:
+ self.prerelease = (prerelease[0], int(prerelease_num))
+ else:
+ self.prerelease = None
+
+ def __str__(self):
+ if self.version[2] == 0:
+ vstring = ".".join(map(str, self.version[0:2]))
+ else:
+ vstring = ".".join(map(str, self.version))
+
+ if self.prerelease:
+ vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
+
+ return vstring
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = StrictVersion(other)
+ elif not isinstance(other, StrictVersion):
+ return NotImplemented
+
+ if self.version != other.version:
+ # numeric versions don't match
+ # prerelease stuff doesn't matter
+ if self.version < other.version:
+ return -1
+ else:
+ return 1
+
+ # have to compare prerelease
+ # case 1: neither has prerelease; they're equal
+ # case 2: self has prerelease, other doesn't; other is greater
+ # case 3: self doesn't have prerelease, other does: self is greater
+ # case 4: both have prerelease: must compare them!
+
+ if not self.prerelease and not other.prerelease:
+ return 0
+ elif self.prerelease and not other.prerelease:
+ return -1
+ elif not self.prerelease and other.prerelease:
+ return 1
+ elif self.prerelease and other.prerelease:
+ if self.prerelease == other.prerelease:
+ return 0
+ elif self.prerelease < other.prerelease:
+ return -1
+ else:
+ return 1
+ else:
+ raise AssertionError("never get here")
+
+
+# end class StrictVersion
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separated by a period or by
+# sequences of letters. If only periods, then these are compared
+# left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+# compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison. This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave. There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+# - indicating a "pre-release" version
+# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+# - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking him.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number. The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison. This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct. Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind. The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above. This works great as long
+# as everyone can go along with bondage and discipline. Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination. The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings. This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++". I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2". That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers. It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception). But I'd rather
+# have a conception that matches common notions about version numbers.
+
+
+class LooseVersion(Version):
+ """Version numbering for anarchists and software realists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of a series of numbers,
+ separated by either periods or strings of letters. When comparing
+ version numbers, the numeric components will be compared
+ numerically, and the alphabetic components lexically. The following
+ are all valid version numbers, in no particular order:
+
+ 1.5.1
+ 1.5.2b2
+ 161
+ 3.10a
+ 8.02
+ 3.4j
+ 1996.07.12
+ 3.2.pl0
+ 3.1.1.6
+ 2g6
+ 11g
+ 0.960923
+ 2.2beta29
+ 1.13++
+ 5.5.kw
+ 2.0b1pl0
+
+ In fact, there is no such thing as an invalid version number under
+ this scheme; the rules for comparison are simple and predictable,
+ but may not always give the results you want (for some definition
+ of "want").
+ """
+
+ component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def parse(self, vstring):
+ # I've given up on thinking I can reconstruct the version string
+ # from the parsed tuple -- so I just store the string here for
+ # use by __str__
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring) if x and x != "."]
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+ def __str__(self):
+ return self.vstring
+
+ def __repr__(self):
+ return "LooseVersion ('%s')" % str(self)
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = LooseVersion(other)
+ elif not isinstance(other, LooseVersion):
+ return NotImplemented
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
+
+
+# end class LooseVersion
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
new file mode 100644
index 00000000..81c65507
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
+# on behalf of Telstra Corporation Limited
+#
+# Common functionality to be used by the modules:
+# - acm_certificate
+# - acm_certificate_info
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Common Amazon Certificate Manager facts shared between modules
+"""
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from .core import is_boto3_error_code
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+
+
+class ACMServiceManager(object):
+ """Handles ACM Facts Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('acm')
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
+ def delete_certificate_with_backoff(self, client, arn):
+ client.delete_certificate(CertificateArn=arn)
+
+ def delete_certificate(self, client, module, arn):
+ module.debug("Attempting to delete certificate %s" % arn)
+ try:
+ self.delete_certificate_with_backoff(client, arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn)
+ module.debug("Successfully deleted certificate %s" % arn)
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
+ def list_certificates_with_backoff(self, client, statuses=None):
+ paginator = client.get_paginator('list_certificates')
+ kwargs = dict()
+ if statuses:
+ kwargs['CertificateStatuses'] = statuses
+ return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList']
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
+ def get_certificate_with_backoff(self, client, certificate_arn):
+ response = client.get_certificate(CertificateArn=certificate_arn)
+ # strip out response metadata
+ return {'Certificate': response['Certificate'],
+ 'CertificateChain': response['CertificateChain']}
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
+ def describe_certificate_with_backoff(self, client, certificate_arn):
+ return client.describe_certificate(CertificateArn=certificate_arn)['Certificate']
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
+ def list_certificate_tags_with_backoff(self, client, certificate_arn):
+ return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags']
+
+ # Returns a list of certificates
+ # if domain_name is specified, returns only certificates with that domain
+ # if an ARN is specified, returns only that certificate
+ # only_tags is a dict, e.g. {'key':'value'}. If specified this function will return
+ # only certificates which contain all those tags (key exists, value matches).
+ def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None):
+ try:
+ all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain certificates")
+ if domain_name:
+ certificates = [cert for cert in all_certificates
+ if cert['DomainName'] == domain_name]
+ else:
+ certificates = all_certificates
+
+ if arn:
+ # still return a list, not just one item
+ certificates = [c for c in certificates if c['CertificateArn'] == arn]
+
+ results = []
+ for certificate in certificates:
+ try:
+ cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn'])
+ except is_boto3_error_code('ResourceNotFoundException'):
+ # The certificate was deleted after the call to list_certificates_with_backoff.
+ continue
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName'])
+
+ # in some states, ACM resources do not have a corresponding cert
+ if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']:
+ try:
+ cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn']))
+ except is_boto3_error_code('ResourceNotFoundException'):
+ # The certificate was deleted after the call to list_certificates_with_backoff.
+ continue
+ except (BotoCoreError, ClientError, KeyError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName'])
+ cert_data = camel_dict_to_snake_dict(cert_data)
+ try:
+ tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn'])
+ except is_boto3_error_code('ResourceNotFoundException'):
+ # The certificate was deleted after the call to list_certificates_with_backoff.
+ continue
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName'])
+
+ cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags)
+ results.append(cert_data)
+
+ if only_tags:
+ for tag_key in only_tags:
+ try:
+ results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])]
+ except (TypeError, AttributeError) as e:
+ for c in results:
+ if 'tags' not in c:
+ module.debug("cert is %s" % str(c))
+ module.fail_json(msg="ACM tag filtering err", exception=e)
+
+ return results
+
+ # returns the domain name of a certificate (encoded in the public cert)
+ # for a given ARN
+ # A cert with that ARN must already exist
+ def get_domain_of_cert(self, client, module, arn):
+ if arn is None:
+ module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified")
+ try:
+ cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn)
+ return cert_data['DomainName']
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
+ def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn):
+ if certificate_chain:
+ if arn:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateChain=to_bytes(certificate_chain),
+ CertificateArn=arn)
+ else:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateChain=to_bytes(certificate_chain))
+ else:
+ if arn:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateArn=arn)
+ else:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key))
+ return ret['CertificateArn']
+
+ # Tags are a normal Ansible style dict
+ # {'Key':'Value'}
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
+ def tag_certificate_with_backoff(self, client, arn, tags):
+ aws_tags = ansible_dict_to_boto3_tag_list(tags)
+ client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags)
+
+ def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None):
+
+ original_arn = arn
+
+ # upload cert
+ try:
+ arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't upload new certificate")
+
+ if original_arn and (arn != original_arn):
+ # I'm not sure whether the API guarentees that the ARN will not change
+ # I'm failing just in case.
+ # If I'm wrong, I'll catch it in the integration tests.
+ module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn))
+
+ # tag that cert
+ try:
+ self.tag_certificate_with_backoff(client, arn, tags)
+ except (BotoCoreError, ClientError) as e:
+ module.debug("Attempting to delete the cert we just created, arn=%s" % arn)
+ try:
+ self.delete_certificate_with_backoff(client, arn)
+ except (BotoCoreError, ClientError):
+ module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.")
+ module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn)
+ module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn)
+
+ return arn
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/arn.py b/ansible_collections/amazon/aws/plugins/module_utils/arn.py
new file mode 100644
index 00000000..ac8dfc9e
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/arn.py
@@ -0,0 +1,69 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+def parse_aws_arn(arn):
+ """
+ The following are the general formats for ARNs.
+ arn:partition:service:region:account-id:resource-id
+ arn:partition:service:region:account-id:resource-type/resource-id
+ arn:partition:service:region:account-id:resource-type:resource-id
+ The specific formats depend on the resource.
+ The ARNs for some resources omit the Region, the account ID, or both the Region and the account ID.
+ """
+ m = re.search(r"arn:(aws(-([a-z\-]+))?):([\w-]+):([a-z0-9\-]*):(\d*|aws|aws-managed):(.*)", arn)
+ if m is None:
+ return None
+ result = dict()
+ result.update(dict(partition=m.group(1)))
+ result.update(dict(service=m.group(4)))
+ result.update(dict(region=m.group(5)))
+ result.update(dict(account_id=m.group(6)))
+ result.update(dict(resource=m.group(7)))
+
+ return result
+
+
+# An implementation of this used was originally in ec2.py, however Outposts
+# aren't specific to the EC2 service
+def is_outpost_arn(arn):
+ """
+ Validates that the ARN is for an AWS Outpost
+
+
+ API Specification Document:
+ https://docs.aws.amazon.com/outposts/latest/APIReference/API_Outpost.html
+ """
+ details = parse_aws_arn(arn)
+
+ if not details:
+ return False
+
+ service = details.get('service') or ""
+ if service.lower() != 'outposts':
+ return False
+ resource = details.get('resource') or ""
+ if not re.match('^outpost/op-[a-f0-9]{17}$', resource):
+ return False
+
+ return True
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/batch.py b/ansible_collections/amazon/aws/plugins/module_utils/batch.py
new file mode 100644
index 00000000..c2721451
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/batch.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2017 Ansible Project
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Batch modules.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+
+def cc(key):
+ """
+ Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes
+ 'computeEnvironmentName'.
+
+ :param key:
+ :return:
+ """
+ components = key.split('_')
+ return components[0] + "".join([token.capitalize() for token in components[1:]])
+
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+ :param module:
+ :param module_params:
+ :return:
+ """
+ api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
+ return snake_dict_to_camel_dict(api_params)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
new file mode 100644
index 00000000..0f368e65
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
@@ -0,0 +1,357 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+A set of helper functions designed to help with initializing boto3/botocore
+connections.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import traceback
+
+BOTO3_IMP_ERR = None
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ BOTO3_IMP_ERR = traceback.format_exc()
+ HAS_BOTO3 = False
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six import binary_type
+from ansible.module_utils.six import text_type
+
+from .retries import AWSRetry
+
+
+def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
+ """
+ Builds a boto3 resource/client connection cleanly wrapping the most common failures.
+ Handles:
+ ValueError,
+ botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError,
+ botocore.exceptions.NoRegionError
+ """
+ try:
+ return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
+ except ValueError as e:
+ module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
+ module.fail_json(msg=to_native(e))
+ except botocore.exceptions.NoRegionError:
+ module.fail_json(msg="The %s module requires a region and none was found in configuration, "
+ "environment variables or module parameters" % module._name)
+
+
+def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
+ """
+ Builds a boto3 resource/client connection cleanly wrapping the most common failures.
+ No exceptions are caught/handled.
+ """
+ profile = params.pop('profile_name', None)
+
+ if conn_type not in ['both', 'resource', 'client']:
+ raise ValueError('There is an issue in the calling code. You '
+ 'must specify either both, resource, or client to '
+ 'the conn_type parameter in the boto3_conn function '
+ 'call')
+
+ config = botocore.config.Config(
+ user_agent_extra='Ansible/{0}'.format(__version__),
+ )
+
+ if params.get('config') is not None:
+ config = config.merge(params.pop('config'))
+ if params.get('aws_config') is not None:
+ config = config.merge(params.pop('aws_config'))
+
+ session = boto3.session.Session(
+ profile_name=profile,
+ )
+
+ enable_placebo(session)
+
+ if conn_type == 'resource':
+ return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ elif conn_type == 'client':
+ return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ else:
+ client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
+ resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ return client, resource
+
+
+# Inventory plugins don't have access to the same 'module', they need to throw
+# an exception rather than calling module.fail_json
+boto3_inventory_conn = _boto3_conn
+
+
+def boto_exception(err):
+ """
+ Extracts the error message from a boto exception.
+
+ :param err: Exception from boto
+ :return: Error message
+ """
+ if hasattr(err, 'error_message'):
+ error = err.error_message
+ elif hasattr(err, 'message'):
+ error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
+ else:
+ error = '%s: %s' % (Exception, err)
+
+ return error
+
+
+def get_aws_region(module, boto3=None):
+ region = module.params.get('region')
+
+ if region:
+ return region
+
+ if not HAS_BOTO3:
+ module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
+
+ # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
+ try:
+ # Botocore doesn't like empty strings, make sure we default to None in the case of an empty
+ # string.
+ profile_name = module.params.get('profile') or None
+ return botocore.session.Session(profile=profile_name).get_config_variable('region')
+ except botocore.exceptions.ProfileNotFound:
+ return None
+
+
+def get_aws_connection_info(module, boto3=None):
+
+ # Check module args for credentials, then check environment vars
+ # access_key
+
+ endpoint_url = module.params.get('endpoint_url')
+ access_key = module.params.get('access_key')
+ secret_key = module.params.get('secret_key')
+ session_token = module.params.get('session_token')
+ region = get_aws_region(module)
+ profile_name = module.params.get('profile')
+ validate_certs = module.params.get('validate_certs')
+ ca_bundle = module.params.get('aws_ca_bundle')
+ config = module.params.get('aws_config')
+
+ if profile_name and (access_key or secret_key or session_token):
+ module.fail_json(msg="Passing both a profile and access tokens is not supported.")
+
+ # Botocore doesn't like empty strings, make sure we default to None in the case of an empty
+ # string.
+ if not access_key:
+ access_key = None
+ if not secret_key:
+ secret_key = None
+ if not session_token:
+ session_token = None
+
+ if profile_name:
+ boto_params = dict(
+ aws_access_key_id=None,
+ aws_secret_access_key=None,
+ aws_session_token=None,
+ profile_name=profile_name,
+ )
+ else:
+ boto_params = dict(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ aws_session_token=session_token,
+ )
+
+ if validate_certs and ca_bundle:
+ boto_params['verify'] = ca_bundle
+ else:
+ boto_params['verify'] = validate_certs
+
+ if config is not None:
+ boto_params['aws_config'] = botocore.config.Config(**config)
+
+ for param, value in boto_params.items():
+ if isinstance(value, binary_type):
+ boto_params[param] = text_type(value, 'utf-8', 'strict')
+
+ return region, endpoint_url, boto_params
+
+
+def _paginated_query(client, paginator_name, **params):
+ paginator = client.get_paginator(paginator_name)
+ result = paginator.paginate(**params).build_full_result()
+ return result
+
+
+def paginated_query_with_retries(client, paginator_name, retry_decorator=None, **params):
+ """
+ Performs a boto3 paginated query.
+ By default uses uses AWSRetry.jittered_backoff(retries=10) to retry queries
+ with temporary failures.
+
+ Examples:
+ tags = paginated_query_with_retries(client, "describe_tags", Filters=[])
+
+ decorator = AWSRetry.backoff(tries=5, delay=5, backoff=2.0,
+ catch_extra_error_codes=['RequestInProgressException'])
+ certificates = paginated_query_with_retries(client, "list_certificates", decorator)
+ """
+ if retry_decorator is None:
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+ result = retry_decorator(_paginated_query)(client, paginator_name, **params)
+ return result
+
+
+def gather_sdk_versions():
+ """Gather AWS SDK (boto3 and botocore) dependency versions
+
+ Returns {'boto3_version': str, 'botocore_version': str}
+ Returns {} if either module is not installed
+ """
+ if not HAS_BOTO3:
+ return {}
+ import boto3
+ import botocore
+ return dict(boto3_version=boto3.__version__,
+ botocore_version=botocore.__version__)
+
+
+def is_boto3_error_code(code, e=None):
+ """Check if the botocore exception is raised by a specific error code.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_instances(InstanceIds=['potato'])
+ except is_boto3_error_code('InvalidInstanceID.Malformed'):
+ # handle the error for that code case
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if not isinstance(code, list):
+ code = [code]
+ if isinstance(e, ClientError) and e.response['Error']['Code'] in code:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def is_boto3_error_message(msg, e=None):
+ """Check if the botocore exception contains a specific error message.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_vpc_classic_link(VpcIds=[vpc_id])
+ except is_boto3_error_message('The functionality you requested is not available in this region.'):
+ # handle the error for that error message
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if isinstance(e, ClientError) and msg in e.response['Error']['Message']:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def get_boto3_client_method_parameters(client, method_name, required=False):
+ op = client.meta.method_to_api_mapping.get(method_name)
+ input_shape = client._service_model.operation_model(op).input_shape
+ if not input_shape:
+ parameters = []
+ elif required:
+ parameters = list(input_shape.required_members)
+ else:
+ parameters = list(input_shape.members.keys())
+ return parameters
+
+
+# Used by normalize_boto3_result
+def _boto3_handler(obj):
+ if hasattr(obj, 'isoformat'):
+ return obj.isoformat()
+ else:
+ return obj
+
+
+def normalize_boto3_result(result):
+ """
+ Because Boto3 returns datetime objects where it knows things are supposed to
+ be dates we need to mass-convert them over to strings which Ansible/Jinja
+ handle better. This also makes it easier to compare complex objects which
+ include a mix of dates in string format (from parameters) and dates as
+ datetime objects. Boto3 is happy to be passed ISO8601 format strings.
+ """
+ return json.loads(json.dumps(result, default=_boto3_handler))
+
+
+def enable_placebo(session):
+ """
+ Helper to record or replay offline modules for testing purpose.
+ """
+ if "_ANSIBLE_PLACEBO_RECORD" in os.environ:
+ import placebo
+ existing_entries = os.listdir(os.environ["_ANSIBLE_PLACEBO_RECORD"])
+ idx = len(existing_entries)
+ data_path = f"{os.environ['_ANSIBLE_PLACEBO_RECORD']}/{idx}"
+ os.mkdir(data_path)
+ pill = placebo.attach(session, data_path=data_path)
+ pill.record()
+ if "_ANSIBLE_PLACEBO_REPLAY" in os.environ:
+ import shutil
+ import placebo
+ existing_entries = sorted([int(i) for i in os.listdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])])
+ idx = str(existing_entries[0])
+ data_path = os.environ['_ANSIBLE_PLACEBO_REPLAY'] + "/" + idx
+ try:
+ shutil.rmtree("_tmp")
+ except FileNotFoundError:
+ pass
+ shutil.move(data_path, "_tmp")
+ if len(existing_entries) == 1:
+ os.rmdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])
+ pill = placebo.attach(session, data_path="_tmp")
+ pill.playback()
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
new file mode 100644
index 00000000..e690c0a8
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2021 Ansible Project
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+import functools
+import random
+import ansible.module_utils.common.warnings as ansible_warnings
+
+
+class BackoffIterator:
+ """iterate sleep value based on the exponential or jitter back-off algorithm.
+ Args:
+ delay (int or float): initial delay.
+ backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry.
+ max_delay (int or None): maximum amount of time to wait between retries.
+ jitter (bool): if set to true, add jitter to the generate value.
+ """
+
+ def __init__(self, delay, backoff, max_delay=None, jitter=False):
+ self.delay = delay
+ self.backoff = backoff
+ self.max_delay = max_delay
+ self.jitter = jitter
+
+ def __iter__(self):
+ self.current_delay = self.delay
+ return self
+
+ def __next__(self):
+ return_value = self.current_delay if self.max_delay is None else min(self.current_delay, self.max_delay)
+ if self.jitter:
+ return_value = random.uniform(0.0, return_value)
+ self.current_delay *= self.backoff
+ return return_value
+
+
+def _retry_func(func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class):
+ counter = 0
+ for sleep_time in sleep_time_generator:
+ try:
+ return func()
+ except Exception as exc: # pylint: disable=broad-except
+ counter += 1
+ if counter == retries:
+ raise
+ if base_class and not isinstance(exc, base_class):
+ raise
+ status_code = status_code_from_except_f(exc)
+ if found_f(status_code, catch_extra_error_codes):
+ time.sleep(sleep_time)
+ else:
+ raise
+
+
+class CloudRetry:
+ """
+ The base class to be used by other cloud providers to provide a backoff/retry decorator based on status codes.
+ """
+
+ base_class = type(None)
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """
+ Returns the Error 'code' from an exception.
+ Args:
+ error: The Exception from which the error code is to be extracted.
+ error will be an instance of class.base_class.
+ """
+ raise NotImplementedError()
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ def _is_iterable():
+ try:
+ iter(catch_extra_error_codes)
+ except TypeError:
+ # not iterable
+ return False
+ else:
+ # iterable
+ return True
+ return _is_iterable() and response_code in catch_extra_error_codes
+
+ @classmethod
+ def base_decorator(cls, retries, found, status_code_from_exception, catch_extra_error_codes, sleep_time_generator):
+ def retry_decorator(func):
+ @functools.wraps(func)
+ def _retry_wrapper(*args, **kwargs):
+ partial_func = functools.partial(func, *args, **kwargs)
+ return _retry_func(
+ func=partial_func,
+ sleep_time_generator=sleep_time_generator,
+ retries=retries,
+ catch_extra_error_codes=catch_extra_error_codes,
+ found_f=found,
+ status_code_from_except_f=status_code_from_exception,
+ base_class=cls.base_class,
+ )
+ return _retry_wrapper
+ return retry_decorator
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """Wrap a callable with retry behavior.
+ Args:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry
+ default=2
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry
+ default=None
+ Returns:
+ Callable: A generator that calls the decorated function using an exponential backoff.
+ """
+ sleep_time_generator = BackoffIterator(delay=delay, backoff=backoff, max_delay=max_delay)
+ return cls.base_decorator(
+ retries=retries,
+ found=cls.found,
+ status_code_from_exception=cls.status_code_from_exception,
+ catch_extra_error_codes=catch_extra_error_codes,
+ sleep_time_generator=sleep_time_generator,
+ )
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, backoff=2.0, max_delay=60, catch_extra_error_codes=None):
+ """Wrap a callable with retry behavior.
+ Args:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry
+ default=2.0
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry
+ default=None
+ Returns:
+ Callable: A generator that calls the decorated function using using a jittered backoff strategy.
+ """
+ sleep_time_generator = BackoffIterator(delay=delay, backoff=backoff, max_delay=max_delay, jitter=True)
+ return cls.base_decorator(
+ retries=retries,
+ found=cls.found,
+ status_code_from_exception=cls.status_code_from_exception,
+ catch_extra_error_codes=catch_extra_error_codes,
+ sleep_time_generator=sleep_time_generator,
+ )
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Wrap a callable with retry behavior.
+ Developers should use CloudRetry.exponential_backoff instead.
+ This method has been deprecated and will be removed in release 6.0.0, consider using exponential_backoff method instead.
+ Args:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry
+ default=1.1
+ catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry
+ default=None
+ Returns:
+ Callable: A generator that calls the decorated function using an exponential backoff.
+ """
+ # This won't emit a warning (we don't have the context available to us), but will trigger
+ # sanity failures as we prepare for 6.0.0
+ ansible_warnings.deprecate(
+ 'CloudRetry.backoff has been deprecated, please use CloudRetry.exponential_backoff instead',
+ version='6.0.0', collection_name='amazon.aws')
+
+ return cls.exponential_backoff(
+ retries=tries,
+ delay=delay,
+ backoff=backoff,
+ max_delay=None,
+ catch_extra_error_codes=catch_extra_error_codes,
+ )
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
new file mode 100644
index 00000000..c628bff1
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Willem van Ketwich
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# - Willem van Ketwich <willem@vanketwich.com.au>
+#
+# Common functionality to be used by the modules:
+# - cloudfront_distribution
+# - cloudfront_invalidation
+# - cloudfront_origin_access_identity
+"""
+Common cloudfront facts shared between modules
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+from .ec2 import boto3_tag_list_to_ansible_dict
+
+
+class CloudFrontFactsServiceManager(object):
+ """Handles CloudFront Facts Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff())
+
+ def get_distribution(self, distribution_id):
+ try:
+ return self.client.get_distribution(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing distribution")
+
+ def get_distribution_config(self, distribution_id):
+ try:
+ return self.client.get_distribution_config(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing distribution configuration")
+
+ def get_origin_access_identity(self, origin_access_identity_id):
+ try:
+ return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing origin access identity")
+
+ def get_origin_access_identity_config(self, origin_access_identity_id):
+ try:
+ return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
+
+ def get_invalidation(self, distribution_id, invalidation_id):
+ try:
+ return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing invalidation")
+
+ def get_streaming_distribution(self, distribution_id):
+ try:
+ return self.client.get_streaming_distribution(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing streaming distribution")
+
+ def get_streaming_distribution_config(self, distribution_id):
+ try:
+ return self.client.get_streaming_distribution_config(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing streaming distribution")
+
+ def list_origin_access_identities(self):
+ try:
+ paginator = self.client.get_paginator('list_cloud_front_origin_access_identities')
+ result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {})
+ return result.get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
+
+ def list_distributions(self, keyed=True):
+ try:
+ paginator = self.client.get_paginator('list_distributions')
+ result = paginator.paginate().build_full_result().get('DistributionList', {})
+ distribution_list = result.get('Items', [])
+ if not keyed:
+ return distribution_list
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing distributions")
+
+ def list_distributions_by_web_acl_id(self, web_acl_id):
+ try:
+ result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id, aws_retry=True)
+ distribution_list = result.get('DistributionList', {}).get('Items', [])
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
+
+ def list_invalidations(self, distribution_id):
+ try:
+ paginator = self.client.get_paginator('list_invalidations')
+ result = paginator.paginate(DistributionId=distribution_id).build_full_result()
+ return result.get('InvalidationList', {}).get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing invalidations")
+
+ def list_streaming_distributions(self, keyed=True):
+ try:
+ paginator = self.client.get_paginator('list_streaming_distributions')
+ result = paginator.paginate().build_full_result()
+ streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', [])
+ if not keyed:
+ return streaming_distribution_list
+ return self.keyed_list_helper(streaming_distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing streaming distributions")
+
+ def summary(self):
+ summary_dict = {}
+ summary_dict.update(self.summary_get_distribution_list(False))
+ summary_dict.update(self.summary_get_distribution_list(True))
+ summary_dict.update(self.summary_get_origin_access_identity_list())
+ return summary_dict
+
+ def summary_get_origin_access_identity_list(self):
+ try:
+ origin_access_identity_list = {'origin_access_identities': []}
+ origin_access_identities = self.list_origin_access_identities()
+ for origin_access_identity in origin_access_identities:
+ oai_id = origin_access_identity['Id']
+ oai_full_response = self.get_origin_access_identity(oai_id)
+ oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
+ origin_access_identity_list['origin_access_identities'].append(oai_summary)
+ return origin_access_identity_list
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
+
+ def summary_get_distribution_list(self, streaming=False):
+ try:
+ list_name = 'streaming_distributions' if streaming else 'distributions'
+ key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
+ distribution_list = {list_name: []}
+ distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
+ for dist in distributions:
+ temp_distribution = {}
+ for key_name in key_list:
+ temp_distribution[key_name] = dist[key_name]
+ temp_distribution['Aliases'] = list(dist['Aliases'].get('Items', []))
+ temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
+ if not streaming:
+ temp_distribution['WebACLId'] = dist['WebACLId']
+ invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
+ if invalidation_ids:
+ temp_distribution['Invalidations'] = invalidation_ids
+ resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'], aws_retry=True)
+ temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
+ distribution_list[list_name].append(temp_distribution)
+ return distribution_list
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of distributions")
+
+ def get_etag_from_distribution_id(self, distribution_id, streaming):
+ distribution = {}
+ if not streaming:
+ distribution = self.get_distribution(distribution_id)
+ else:
+ distribution = self.get_streaming_distribution(distribution_id)
+ return distribution['ETag']
+
+ def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
+ try:
+ invalidation_ids = []
+ invalidations = self.list_invalidations(distribution_id)
+ for invalidation in invalidations:
+ invalidation_ids.append(invalidation['Id'])
+ return invalidation_ids
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
+
+ def get_distribution_id_from_domain_name(self, domain_name):
+ try:
+ distribution_id = ""
+ distributions = self.list_distributions(False)
+ distributions += self.list_streaming_distributions(False)
+ for dist in distributions:
+ if 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ if str(alias).lower() == domain_name.lower():
+ distribution_id = dist['Id']
+ break
+ return distribution_id
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
+
+ def get_aliases_from_distribution_id(self, distribution_id):
+ try:
+ distribution = self.get_distribution(distribution_id)
+ return distribution['DistributionConfig']['Aliases'].get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
+
+ def keyed_list_helper(self, list_to_key):
+ keyed_list = dict()
+ for item in list_to_key:
+ distribution_id = item['Id']
+ if 'Items' in item['Aliases']:
+ aliases = item['Aliases']['Items']
+ for alias in aliases:
+ keyed_list.update({alias: item})
+ keyed_list.update({distribution_id: item})
+ return keyed_list
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/core.py b/ansible_collections/amazon/aws/plugins/module_utils/core.py
new file mode 100644
index 00000000..bfd7fe10
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/core.py
@@ -0,0 +1,77 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module adds shared support for generic Amazon AWS modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+ module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
+ mutually_exclusive=list1, required_together=list2)
+
+The 'AnsibleAWSModule' module provides similar, but more restricted,
+interfaces to the normal Ansible module. It also includes the
+additional methods for connecting to AWS using the standard module arguments
+
+ m.resource('lambda') # - get an AWS connection as a boto3 resource.
+
+or
+
+ m.client('sts') # - get an AWS connection as a boto3 client.
+
+To make use of AWSRetry easier, it can now be wrapped around any call from a
+module-created client. To add retries to a client, create a client:
+
+ m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+ ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn
+from .arn import parse_aws_arn # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
+from .botocore import HAS_BOTO3 # pylint: disable=unused-import
+from .botocore import is_boto3_error_code # pylint: disable=unused-import
+from .botocore import is_boto3_error_message # pylint: disable=unused-import
+from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import
+from .botocore import normalize_boto3_result # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules
+from .modules import AnsibleAWSModule # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules
+from .transformation import scrub_none_parameters # pylint: disable=unused-import
+
+# We will also export HAS_BOTO3 so end user modules can use it.
+__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code', 'is_boto3_error_message')
+
+
+class AnsibleAWSError(Exception):
+ pass
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
new file mode 100644
index 00000000..abcbcfd2
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2017 Ansible Project
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Direct Connect modules.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+
+
+class DirectConnectError(Exception):
+ def __init__(self, msg, last_traceback=None, exception=None):
+ self.msg = msg
+ self.last_traceback = last_traceback
+ self.exception = exception
+
+
+def delete_connection(client, connection_id):
+ try:
+ AWSRetry.jittered_backoff()(client.delete_connection)(connectionId=connection_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def associate_connection_and_lag(client, connection_id, lag_id):
+ try:
+ AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id,
+ lagId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}"
+ " with link aggregation group {1}.".format(connection_id, lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def disassociate_connection_and_lag(client, connection_id, lag_id):
+ try:
+ AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id,
+ lagId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}"
+ " from link aggregation group {1}.".format(connection_id, lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def delete_virtual_interface(client, virtual_interface):
+ try:
+ AWSRetry.jittered_backoff()(client.delete_virtual_interface)(virtualInterfaceId=virtual_interface)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface),
+ last_traceback=traceback.format_exc(),
+ exception=e)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
new file mode 100644
index 00000000..817c1229
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
@@ -0,0 +1,310 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""
+This module adds helper functions for various EC2 specific services.
+
+It also includes a large number of imports for functions which historically
+lived here. Most of these functions were not specific to EC2, they ended
+up in this module because "that's where the AWS code was" (originally).
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+# Used to live here, moved into ansible.module_utils.common.dict_transformations
+from ansible.module_utils.common.dict_transformations import _camel_to_snake # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import _snake_to_camel # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict # pylint: disable=unused-import
+
+# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.arn
+from .arn import is_outpost_arn as is_outposts_arn # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
+from .botocore import HAS_BOTO3 # pylint: disable=unused-import
+from .botocore import boto3_conn # pylint: disable=unused-import
+from .botocore import boto3_inventory_conn # pylint: disable=unused-import
+from .botocore import boto_exception # pylint: disable=unused-import
+from .botocore import get_aws_region # pylint: disable=unused-import
+from .botocore import get_aws_connection_info # pylint: disable=unused-import
+
+from .botocore import paginated_query_with_retries
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
+from .core import AnsibleAWSError # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules
+# The names have been changed in .modules to better reflect their applicability.
+from .modules import _aws_common_argument_spec as aws_common_argument_spec # pylint: disable=unused-import
+from .modules import aws_argument_spec as ec2_argument_spec # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.tagging
+from .tagging import ansible_dict_to_boto3_tag_list # pylint: disable=unused-import
+from .tagging import boto3_tag_list_to_ansible_dict # pylint: disable=unused-import
+from .tagging import compare_aws_tags # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.transformation
+from .transformation import ansible_dict_to_boto3_filter_list # pylint: disable=unused-import
+from .transformation import map_complex_type # pylint: disable=unused-import
+
+# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.policy
+from .policy import _py3cmp as py3cmp # pylint: disable=unused-import
+from .policy import compare_policies # pylint: disable=unused-import
+from .policy import sort_json_policy_dict # pylint: disable=unused-import
+
+# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.retries
+from .retries import AWSRetry # pylint: disable=unused-import
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by HAS_BOTO3
+
+
+def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=None):
+
+ """ Return list of security group IDs from security group names. Note that security group names are not unique
+ across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
+ will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
+ a try block
+ """
+
+ def get_sg_name(sg, boto3=None):
+ return str(sg['GroupName'])
+
+ def get_sg_id(sg, boto3=None):
+ return str(sg['GroupId'])
+
+ sec_group_id_list = []
+
+ if isinstance(sec_group_list, string_types):
+ sec_group_list = [sec_group_list]
+
+ # Get all security groups
+ if vpc_id:
+ filters = [
+ {
+ 'Name': 'vpc-id',
+ 'Values': [
+ vpc_id,
+ ]
+ }
+ ]
+ all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
+ else:
+ all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
+
+ unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
+ sec_group_name_list = list(set(sec_group_list) - set(unmatched))
+
+ if len(unmatched) > 0:
+ # If we have unmatched names that look like an ID, assume they are
+ sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
+ still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
+ if len(still_unmatched) > 0:
+ raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
+
+ sec_group_id_list += [get_sg_id(all_sg) for all_sg in all_sec_groups if get_sg_name(all_sg) in sec_group_name_list]
+
+ return sec_group_id_list
+
+
+def add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes=None):
+ """
+ Sets Tags on an EC2 resource.
+
+ :param client: an EC2 boto3 client
+ :param module: an AnsibleAWSModule object
+ :param resource_id: the identifier for the resource
+ :param tags_to_set: A dictionary of key/value pairs to set
+ :param retry_codes: additional boto3 error codes to trigger retries
+ """
+
+ if not tags_to_set:
+ return False
+ if module.check_mode:
+ return True
+
+ if not retry_codes:
+ retry_codes = []
+
+ try:
+ tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set)
+ AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(
+ client.create_tags
+ )(
+ Resources=[resource_id], Tags=tags_to_add
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to add tags {0} to {1}".format(tags_to_set, resource_id))
+ return True
+
+
+def remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes=None):
+ """
+ Removes Tags from an EC2 resource.
+
+ :param client: an EC2 boto3 client
+ :param module: an AnsibleAWSModule object
+ :param resource_id: the identifier for the resource
+ :param tags_to_unset: a list of tag keys to removes
+ :param retry_codes: additional boto3 error codes to trigger retries
+ """
+
+ if not tags_to_unset:
+ return False
+ if module.check_mode:
+ return True
+
+ if not retry_codes:
+ retry_codes = []
+
+ tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset]
+
+ try:
+ AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(
+ client.delete_tags
+ )(
+ Resources=[resource_id], Tags=tags_to_remove
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete tags {0} from {1}".format(tags_to_unset, resource_id))
+ return True
+
+
+def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_codes=None):
+ """
+ Performs a paginated search of EC2 resource tags.
+
+ :param client: an EC2 boto3 client
+ :param module: an AnsibleAWSModule object
+ :param resource_id: the identifier for the resource
+ :param resource_type: the type of the resource
+ :param retry_codes: additional boto3 error codes to trigger retries
+ """
+ filters = {'resource-id': resource_id}
+ if resource_type:
+ filters['resource-type'] = resource_type
+ filters = ansible_dict_to_boto3_filter_list(filters)
+
+ if not retry_codes:
+ retry_codes = []
+
+ try:
+ retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)
+ results = paginated_query_with_retries(client, 'describe_tags', retry_decorator=retry_decorator,
+ Filters=filters)
+ return boto3_tag_list_to_ansible_dict(results.get('Tags', None))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe tags for EC2 Resource: {0}".format(resource_id))
+
+
+def ensure_ec2_tags(client, module, resource_id, resource_type=None, tags=None, purge_tags=True, retry_codes=None):
+ """
+ Updates the tags on an EC2 resource.
+
+ To remove all tags the tags parameter must be explicitly set to an empty dictionary.
+
+ :param client: an EC2 boto3 client
+ :param module: an AnsibleAWSModule object
+ :param resource_id: the identifier for the resource
+ :param resource_type: the type of the resource
+ :param tags: the Tags to apply to the resource
+ :param purge_tags: whether tags missing from the tag list should be removed
+ :param retry_codes: additional boto3 error codes to trigger retries
+ :return: changed: returns True if the tags are changed
+ """
+
+ if tags is None:
+ return False
+
+ if not retry_codes:
+ retry_codes = []
+
+ changed = False
+ current_tags = describe_ec2_tags(client, module, resource_id, resource_type, retry_codes)
+
+ tags_to_set, tags_to_unset = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if purge_tags and not tags:
+ tags_to_unset = current_tags
+
+ changed |= remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes)
+ changed |= add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes)
+
+ return changed
+
+
+def normalize_ec2_vpc_dhcp_config(option_config):
+ """
+ The boto2 module returned a config dict, but boto3 returns a list of dicts
+ Make the data we return look like the old way, so we don't break users.
+ This is also much more user-friendly.
+ boto3:
+ 'DhcpConfigurations': [
+ {'Key': 'domain-name', 'Values': [{'Value': 'us-west-2.compute.internal'}]},
+ {'Key': 'domain-name-servers', 'Values': [{'Value': 'AmazonProvidedDNS'}]},
+ {'Key': 'netbios-name-servers', 'Values': [{'Value': '1.2.3.4'}, {'Value': '5.6.7.8'}]},
+ {'Key': 'netbios-node-type', 'Values': [1]},
+ {'Key': 'ntp-servers', 'Values': [{'Value': '1.2.3.4'}, {'Value': '5.6.7.8'}]}
+ ],
+ The module historically returned:
+ "new_options": {
+ "domain-name": "ec2.internal",
+ "domain-name-servers": ["AmazonProvidedDNS"],
+ "netbios-name-servers": ["10.0.0.1", "10.0.1.1"],
+ "netbios-node-type": "1",
+ "ntp-servers": ["10.0.0.2", "10.0.1.2"]
+ },
+ """
+ config_data = {}
+
+ if len(option_config) == 0:
+ # If there is no provided config, return the empty dictionary
+ return config_data
+
+ for config_item in option_config:
+ # Handle single value keys
+ if config_item['Key'] == 'netbios-node-type':
+ if isinstance(config_item['Values'], integer_types):
+ config_data['netbios-node-type'] = str((config_item['Values']))
+ elif isinstance(config_item['Values'], list):
+ config_data['netbios-node-type'] = str((config_item['Values'][0]['Value']))
+ # Handle actual lists of values
+ for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']:
+ if config_item['Key'] == option:
+ config_data[option] = [val['Value'] for val in config_item['Values']]
+
+ return config_data
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
new file mode 100644
index 00000000..218052d2
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from .core import is_boto3_error_code
+from .ec2 import AWSRetry
+
+
+def get_elb(connection, module, elb_name):
+ """
+ Get an ELB based on name. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_name: Name of load balancer to get
+ :return: boto3 ELB dict or None if not found
+ """
+ try:
+ return _get_elb(connection, module, elb_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _get_elb(connection, module, elb_name):
+ """
+ Get an ELB based on name using AWSRetry. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_name: Name of load balancer to get
+ :return: boto3 ELB dict or None if not found
+ """
+
+ try:
+ load_balancer_paginator = connection.get_paginator('describe_load_balancers')
+ return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0]
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ return None
+
+
+def get_elb_listener(connection, module, elb_arn, listener_port):
+ """
+ Get an ELB listener based on the port provided. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_arn: ARN of the ELB to look at
+ :param listener_port: Port of the listener to look for
+ :return: boto3 ELB listener dict or None if not found
+ """
+
+ try:
+ listener_paginator = connection.get_paginator('describe_listeners')
+ listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ l = None
+
+ for listener in listeners:
+ if listener['Port'] == listener_port:
+ l = listener
+ break
+
+ return l
+
+
+def get_elb_listener_rules(connection, module, listener_arn):
+ """
+ Get rules for a particular ELB listener using the listener ARN.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param listener_arn: ARN of the ELB listener
+ :return: boto3 ELB rules list
+ """
+
+ try:
+ return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+
+def convert_tg_name_to_arn(connection, module, tg_name):
+ """
+ Get ARN of a target group using the target group's name
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param tg_name: Name of the target group
+ :return: target group ARN string
+ """
+
+ try:
+ response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ tg_arn = response['TargetGroups'][0]['TargetGroupArn']
+
+ return tg_arn
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
new file mode 100644
index 00000000..533fd75e
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
@@ -0,0 +1,1114 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+from copy import deepcopy
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+from .ec2 import get_ec2_security_group_ids_from_names
+from .elb_utils import convert_tg_name_to_arn
+from .elb_utils import get_elb
+from .elb_utils import get_elb_listener
+from .waiters import get_waiter
+
+
+def _simple_forward_config_arn(config, parent_arn):
+ config = deepcopy(config)
+
+ stickiness = config.pop('TargetGroupStickinessConfig', {'Enabled': False})
+ # Stickiness options set, non default value
+ if stickiness != {'Enabled': False}:
+ return False
+
+ target_groups = config.pop('TargetGroups', [])
+
+ # non-default config left over, probably invalid
+ if config:
+ return False
+ # Multiple TGS, not simple
+ if len(target_groups) > 1:
+ return False
+
+ if not target_groups:
+ # with no TGs defined, but an ARN set, this is one of the minimum possible configs
+ return parent_arn or False
+
+ target_group = target_groups[0]
+ # We don't care about the weight with a single TG
+ target_group.pop('Weight', None)
+
+ target_group_arn = target_group.pop('TargetGroupArn', None)
+
+ # non-default config left over
+ if target_group:
+ return False
+
+ # We didn't find an ARN
+ if not (target_group_arn or parent_arn):
+ return False
+
+ # Only one
+ if not parent_arn:
+ return target_group_arn
+ if not target_group_arn:
+ return parent_arn
+
+ if parent_arn != target_group_arn:
+ return False
+
+ return target_group_arn
+
+
+# ForwardConfig may be optional if we've got a single TargetGroupArn entry
+def _prune_ForwardConfig(action):
+ """
+ Drops a redundant ForwardConfig where TargetGroupARN has already been set.
+ (So we can perform comparisons)
+ """
+ if action.get('Type', "") != 'forward':
+ return action
+ if "ForwardConfig" not in action:
+ return action
+
+ parent_arn = action.get('TargetGroupArn', None)
+ arn = _simple_forward_config_arn(action["ForwardConfig"], parent_arn)
+ if not arn:
+ return action
+
+ # Remove the redundant ForwardConfig
+ newAction = action.copy()
+ del newAction["ForwardConfig"]
+ newAction["TargetGroupArn"] = arn
+ return newAction
+
+
+# the AWS api won't return the client secret, so we'll have to remove it
+# or the module will always see the new and current actions as different
+# and try to apply the same config
+def _prune_secret(action):
+ if action['Type'] != 'authenticate-oidc':
+ return action
+
+ action['AuthenticateOidcConfig'].pop('ClientSecret', None)
+ if action['AuthenticateOidcConfig'].get('UseExistingClientSecret', False):
+ action['AuthenticateOidcConfig'].pop('UseExistingClientSecret')
+
+ return action
+
+
+def _sort_actions(actions):
+ return sorted(actions, key=lambda x: x.get('Order', 0))
+
+
+class ElasticLoadBalancerV2(object):
+
+ def __init__(self, connection, module):
+
+ self.connection = connection
+ self.module = module
+ self.changed = False
+ self.new_load_balancer = False
+ self.scheme = module.params.get("scheme")
+ self.name = module.params.get("name")
+ self.subnet_mappings = module.params.get("subnet_mappings")
+ self.subnets = module.params.get("subnets")
+ self.deletion_protection = module.params.get("deletion_protection")
+ self.elb_ip_addr_type = module.params.get("ip_address_type")
+ self.wait = module.params.get("wait")
+
+ if module.params.get("tags") is not None:
+ self.tags = ansible_dict_to_boto3_tag_list(module.params.get("tags"))
+ else:
+ self.tags = None
+
+ self.purge_tags = module.params.get("purge_tags")
+
+ self.elb = get_elb(connection, module, self.name)
+ if self.elb is not None:
+ self.elb_attributes = self.get_elb_attributes()
+ self.elb_ip_addr_type = self.get_elb_ip_address_type()
+ self.elb['tags'] = self.get_elb_tags()
+ else:
+ self.elb_attributes = None
+
+ def wait_for_ip_type(self, elb_arn, ip_type):
+ """
+ Wait for load balancer to reach 'active' status
+
+ :param elb_arn: The load balancer ARN
+ :return:
+ """
+
+ if not self.wait:
+ return
+
+ waiter_names = {
+ 'ipv4': 'load_balancer_ip_address_type_ipv4',
+ 'dualstack': 'load_balancer_ip_address_type_dualstack',
+ }
+ if ip_type not in waiter_names:
+ return
+
+ try:
+ waiter = get_waiter(self.connection, waiter_names.get(ip_type))
+ waiter.wait(LoadBalancerArns=[elb_arn])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def wait_for_status(self, elb_arn):
+ """
+ Wait for load balancer to reach 'active' status
+
+ :param elb_arn: The load balancer ARN
+ :return:
+ """
+
+ if not self.wait:
+ return
+
+ try:
+ waiter = get_waiter(self.connection, 'load_balancer_available')
+ waiter.wait(LoadBalancerArns=[elb_arn])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def wait_for_deletion(self, elb_arn):
+ """
+ Wait for load balancer to reach 'active' status
+
+ :param elb_arn: The load balancer ARN
+ :return:
+ """
+
+ if not self.wait:
+ return
+
+ try:
+ waiter = get_waiter(self.connection, 'load_balancers_deleted')
+ waiter.wait(LoadBalancerArns=[elb_arn])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def get_elb_attributes(self):
+ """
+ Get load balancer attributes
+
+ :return:
+ """
+
+ try:
+ attr_list = AWSRetry.jittered_backoff()(
+ self.connection.describe_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes']
+
+ elb_attributes = boto3_tag_list_to_ansible_dict(attr_list)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items())
+
+ def get_elb_ip_address_type(self):
+ """
+ Retrieve load balancer ip address type using describe_load_balancers
+
+ :return:
+ """
+
+ return self.elb.get('IpAddressType', None)
+
+ def update_elb_attributes(self):
+ """
+ Update the elb_attributes parameter
+ :return:
+ """
+ self.elb_attributes = self.get_elb_attributes()
+
+ def get_elb_tags(self):
+ """
+ Get load balancer tags
+
+ :return:
+ """
+
+ try:
+ return AWSRetry.jittered_backoff()(
+ self.connection.describe_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def delete_tags(self, tags_to_delete):
+ """
+ Delete elb tags
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.remove_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def modify_tags(self):
+ """
+ Modify elb tags
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.add_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def delete(self):
+ """
+ Delete elb
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.delete_load_balancer
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.wait_for_deletion(self.elb['LoadBalancerArn'])
+
+ self.changed = True
+
+ def compare_subnets(self):
+ """
+ Compare user subnets with current ELB subnets
+
+ :return: bool True if they match otherwise False
+ """
+
+ subnet_mapping_id_list = []
+ subnet_mappings = []
+
+ # Check if we're dealing with subnets or subnet_mappings
+ if self.subnets is not None:
+ # Convert subnets to subnet_mappings format for comparison
+ for subnet in self.subnets:
+ subnet_mappings.append({'SubnetId': subnet})
+
+ if self.subnet_mappings is not None:
+ # Use this directly since we're comparing as a mapping
+ subnet_mappings = self.subnet_mappings
+
+ # Build a subnet_mapping style struture of what's currently
+ # on the load balancer
+ for subnet in self.elb['AvailabilityZones']:
+ this_mapping = {'SubnetId': subnet['SubnetId']}
+ for address in subnet.get('LoadBalancerAddresses', []):
+ if 'AllocationId' in address:
+ this_mapping['AllocationId'] = address['AllocationId']
+ break
+
+ subnet_mapping_id_list.append(this_mapping)
+
+ return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings)
+
+ def modify_subnets(self):
+ """
+ Modify elb subnets to match module parameters
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.set_subnets
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def update(self):
+ """
+ Update the elb from AWS
+ :return:
+ """
+
+ self.elb = get_elb(self.connection, self.module, self.module.params.get("name"))
+ self.elb['tags'] = self.get_elb_tags()
+
+ def modify_ip_address_type(self, ip_addr_type):
+ """
+ Modify ELB ip address type
+ :return:
+ """
+ if ip_addr_type is None:
+ return
+ if self.elb_ip_addr_type == ip_addr_type:
+ return
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.set_ip_address_type
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], IpAddressType=ip_addr_type)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+ self.wait_for_ip_type(self.elb['LoadBalancerArn'], ip_addr_type)
+
+ def _elb_create_params(self):
+ # Required parameters
+ params = dict()
+ params['Name'] = self.name
+ params['Type'] = self.type
+
+ # Other parameters
+ if self.elb_ip_addr_type is not None:
+ params['IpAddressType'] = self.elb_ip_addr_type
+ if self.subnets is not None:
+ params['Subnets'] = self.subnets
+ if self.subnet_mappings is not None:
+ params['SubnetMappings'] = self.subnet_mappings
+ if self.tags:
+ params['Tags'] = self.tags
+ # Scheme isn't supported for GatewayLBs, so we won't add it here, even though we don't
+ # support them yet.
+
+ return params
+
+ def create_elb(self):
+ """
+ Create a load balancer
+ :return:
+ """
+
+ params = self._elb_create_params()
+
+ try:
+ self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
+ self.changed = True
+ self.new_load_balancer = True
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.wait_for_status(self.elb['LoadBalancerArn'])
+
+
+class ApplicationLoadBalancer(ElasticLoadBalancerV2):
+
+ def __init__(self, connection, connection_ec2, module):
+ """
+
+ :param connection: boto3 connection
+ :param module: Ansible module
+ """
+ super(ApplicationLoadBalancer, self).__init__(connection, module)
+
+ self.connection_ec2 = connection_ec2
+
+ # Ansible module parameters specific to ALBs
+ self.type = 'application'
+ if module.params.get('security_groups') is not None:
+ try:
+ self.security_groups = AWSRetry.jittered_backoff()(
+ get_ec2_security_group_ids_from_names
+ )(module.params.get('security_groups'), self.connection_ec2, boto3=True)
+ except ValueError as e:
+ self.module.fail_json(msg=str(e), exception=traceback.format_exc())
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+ else:
+ self.security_groups = module.params.get('security_groups')
+ self.access_logs_enabled = module.params.get("access_logs_enabled")
+ self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket")
+ self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix")
+ self.idle_timeout = module.params.get("idle_timeout")
+ self.http2 = module.params.get("http2")
+ self.http_desync_mitigation_mode = module.params.get("http_desync_mitigation_mode")
+ self.http_drop_invalid_header_fields = module.params.get("http_drop_invalid_header_fields")
+ self.http_x_amzn_tls_version_and_cipher_suite = module.params.get("http_x_amzn_tls_version_and_cipher_suite")
+ self.http_xff_client_port = module.params.get("http_xff_client_port")
+ self.waf_fail_open = module.params.get("waf_fail_open")
+
+ if self.elb is not None and self.elb['Type'] != 'application':
+ self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.")
+
+ def _elb_create_params(self):
+ params = super()._elb_create_params()
+
+ if self.security_groups is not None:
+ params['SecurityGroups'] = self.security_groups
+ params['Scheme'] = self.scheme
+
+ return params
+
+ def compare_elb_attributes(self):
+ """
+ Compare user attributes with current ELB attributes
+ :return: bool True if they match otherwise False
+ """
+
+ update_attributes = []
+ if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
+ update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
+ if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
+ update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
+ if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
+ update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
+ if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
+ update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+ if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
+ update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
+ if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
+ update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
+ if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \
+ self.elb_attributes['routing_http_desync_mitigation_mode']:
+ update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()})
+ if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \
+ self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']:
+ update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()})
+ if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \
+ self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']:
+ update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled',
+ 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()})
+ if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \
+ self.elb_attributes['routing_http_xff_client_port_enabled']:
+ update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()})
+ if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \
+ self.elb_attributes['waf_fail_open_enabled']:
+ update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()})
+
+ if update_attributes:
+ return False
+ else:
+ return True
+
+ def modify_elb_attributes(self):
+ """
+ Update Application ELB attributes if required
+
+ :return:
+ """
+
+ update_attributes = []
+
+ if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
+ update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
+ if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
+ update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
+ if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
+ update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
+ if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
+ update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+ if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
+ update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
+ if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
+ update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
+ if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \
+ self.elb_attributes['routing_http_desync_mitigation_mode']:
+ update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()})
+ if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \
+ self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']:
+ update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()})
+ if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \
+ self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']:
+ update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled',
+ 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()})
+ if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \
+ self.elb_attributes['routing_http_xff_client_port_enabled']:
+ update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()})
+ if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \
+ self.elb_attributes['waf_fail_open_enabled']:
+ update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()})
+
+ if update_attributes:
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.modify_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
+ self.changed = True
+ except (BotoCoreError, ClientError) as e:
+ # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
+ if self.new_load_balancer:
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ self.module.fail_json_aws(e)
+
+ def compare_security_groups(self):
+ """
+ Compare user security groups with current ELB security groups
+
+ :return: bool True if they match otherwise False
+ """
+
+ if set(self.elb['SecurityGroups']) != set(self.security_groups):
+ return False
+ else:
+ return True
+
+ def modify_security_groups(self):
+ """
+ Modify elb security groups to match module parameters
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.set_security_groups
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+
+class NetworkLoadBalancer(ElasticLoadBalancerV2):
+
+ def __init__(self, connection, connection_ec2, module):
+
+ """
+
+ :param connection: boto3 connection
+ :param module: Ansible module
+ """
+ super(NetworkLoadBalancer, self).__init__(connection, module)
+
+ self.connection_ec2 = connection_ec2
+
+ # Ansible module parameters specific to NLBs
+ self.type = 'network'
+ self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing')
+
+ if self.elb is not None and self.elb['Type'] != 'network':
+ self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.")
+
+ def _elb_create_params(self):
+ params = super()._elb_create_params()
+
+ params['Scheme'] = self.scheme
+
+ return params
+
+ def modify_elb_attributes(self):
+ """
+ Update Network ELB attributes if required
+
+ :return:
+ """
+
+ update_attributes = []
+
+ if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \
+ self.elb_attributes['load_balancing_cross_zone_enabled']:
+ update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()})
+ if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
+ update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+
+ if update_attributes:
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.modify_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
+ self.changed = True
+ except (BotoCoreError, ClientError) as e:
+ # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
+ if self.new_load_balancer:
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ self.module.fail_json_aws(e)
+
+ def modify_subnets(self):
+ """
+ Modify elb subnets to match module parameters (unsupported for NLB)
+ :return:
+ """
+
+ self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer')
+
+
+class ELBListeners(object):
+
+ def __init__(self, connection, module, elb_arn):
+
+ self.connection = connection
+ self.module = module
+ self.elb_arn = elb_arn
+ listeners = module.params.get("listeners")
+ if listeners is not None:
+ # Remove suboption argspec defaults of None from each listener
+ listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners]
+ self.listeners = self._ensure_listeners_default_action_has_arn(listeners)
+ self.current_listeners = self._get_elb_listeners()
+ self.purge_listeners = module.params.get("purge_listeners")
+ self.changed = False
+
+ def update(self):
+ """
+ Update the listeners for the ELB
+
+ :return:
+ """
+ self.current_listeners = self._get_elb_listeners()
+
+ def _get_elb_listeners(self):
+ """
+ Get ELB listeners
+
+ :return:
+ """
+
+ try:
+ listener_paginator = self.connection.get_paginator('describe_listeners')
+ return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def _ensure_listeners_default_action_has_arn(self, listeners):
+ """
+ If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and
+ replace the name.
+
+ :param listeners: a list of listener dicts
+ :return: the same list of dicts ensuring that each listener DefaultActions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
+ """
+
+ if not listeners:
+ listeners = []
+
+ fixed_listeners = []
+ for listener in listeners:
+ fixed_actions = []
+ for action in listener['DefaultActions']:
+ if 'TargetGroupName' in action:
+ action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection,
+ self.module,
+ action['TargetGroupName'])
+ del action['TargetGroupName']
+ fixed_actions.append(action)
+ listener['DefaultActions'] = fixed_actions
+ fixed_listeners.append(listener)
+
+ return fixed_listeners
+
+ def compare_listeners(self):
+ """
+
+ :return:
+ """
+ listeners_to_modify = []
+ listeners_to_delete = []
+ listeners_to_add = deepcopy(self.listeners)
+
+ # Check each current listener port to see if it's been passed to the module
+ for current_listener in self.current_listeners:
+ current_listener_passed_to_module = False
+ for new_listener in self.listeners[:]:
+ new_listener['Port'] = int(new_listener['Port'])
+ if current_listener['Port'] == new_listener['Port']:
+ current_listener_passed_to_module = True
+ # Remove what we match so that what is left can be marked as 'to be added'
+ listeners_to_add.remove(new_listener)
+ modified_listener = self._compare_listener(current_listener, new_listener)
+ if modified_listener:
+ modified_listener['Port'] = current_listener['Port']
+ modified_listener['ListenerArn'] = current_listener['ListenerArn']
+ listeners_to_modify.append(modified_listener)
+ break
+
+ # If the current listener was not matched against passed listeners and purge is True, mark for removal
+ if not current_listener_passed_to_module and self.purge_listeners:
+ listeners_to_delete.append(current_listener['ListenerArn'])
+
+ return listeners_to_add, listeners_to_modify, listeners_to_delete
+
+ def _compare_listener(self, current_listener, new_listener):
+ """
+ Compare two listeners.
+
+ :param current_listener:
+ :param new_listener:
+ :return:
+ """
+
+ modified_listener = {}
+
+ # Port
+ if current_listener['Port'] != new_listener['Port']:
+ modified_listener['Port'] = new_listener['Port']
+
+ # Protocol
+ if current_listener['Protocol'] != new_listener['Protocol']:
+ modified_listener['Protocol'] = new_listener['Protocol']
+
+ # If Protocol is HTTPS, check additional attributes
+ if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
+ # Cert
+ if current_listener['SslPolicy'] != new_listener['SslPolicy']:
+ modified_listener['SslPolicy'] = new_listener['SslPolicy']
+ if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']:
+ modified_listener['Certificates'] = []
+ modified_listener['Certificates'].append({})
+ modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
+ elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
+ modified_listener['SslPolicy'] = new_listener['SslPolicy']
+ modified_listener['Certificates'] = []
+ modified_listener['Certificates'].append({})
+ modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
+
+ # Default action
+
+ # Check proper rule format on current listener
+ if len(current_listener['DefaultActions']) > 1:
+ for action in current_listener['DefaultActions']:
+ if 'Order' not in action:
+ self.module.fail_json(msg="'Order' key not found in actions. "
+ "installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+
+ # If the lengths of the actions are the same, we'll have to verify that the
+ # contents of those actions are the same
+ if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']):
+ current_actions_sorted = _sort_actions(current_listener['DefaultActions'])
+ new_actions_sorted = _sort_actions(new_listener['DefaultActions'])
+
+ new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
+
+ if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]:
+ modified_listener['DefaultActions'] = new_listener['DefaultActions']
+ # If the action lengths are different, then replace with the new actions
+ else:
+ modified_listener['DefaultActions'] = new_listener['DefaultActions']
+
+ if modified_listener:
+ return modified_listener
+ else:
+ return None
+
+
+class ELBListener(object):
+
+ def __init__(self, connection, module, listener, elb_arn):
+ """
+
+ :param connection:
+ :param module:
+ :param listener:
+ :param elb_arn:
+ """
+
+ self.connection = connection
+ self.module = module
+ self.listener = listener
+ self.elb_arn = elb_arn
+
+ def add(self):
+
+ try:
+ # Rules is not a valid parameter for create_listener
+ if 'Rules' in self.listener:
+ self.listener.pop('Rules')
+ AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener)
+ except (BotoCoreError, ClientError) as e:
+ if '"Order", must be one of: Type, TargetGroupArn' in str(e):
+ self.module.fail_json(msg="installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+ else:
+ self.module.fail_json_aws(e)
+
+ def modify(self):
+
+ try:
+ # Rules is not a valid parameter for modify_listener
+ if 'Rules' in self.listener:
+ self.listener.pop('Rules')
+ AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener)
+ except (BotoCoreError, ClientError) as e:
+ if '"Order", must be one of: Type, TargetGroupArn' in str(e):
+ self.module.fail_json(msg="installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+ else:
+ self.module.fail_json_aws(e)
+
+ def delete(self):
+
+ try:
+ AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+
+class ELBListenerRules(object):
+
+ def __init__(self, connection, module, elb_arn, listener_rules, listener_port):
+
+ self.connection = connection
+ self.module = module
+ self.elb_arn = elb_arn
+ self.rules = self._ensure_rules_action_has_arn(listener_rules)
+ self.changed = False
+
+ # Get listener based on port so we can use ARN
+ self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port)
+ self.listener_arn = self.current_listener['ListenerArn']
+ self.rules_to_add = deepcopy(self.rules)
+ self.rules_to_modify = []
+ self.rules_to_delete = []
+
+ # If the listener exists (i.e. has an ARN) get rules for the listener
+ if 'ListenerArn' in self.current_listener:
+ self.current_rules = self._get_elb_listener_rules()
+ else:
+ self.current_rules = []
+
+ def _ensure_rules_action_has_arn(self, rules):
+ """
+ If a rule Action has been passed with a Target Group Name instead of ARN, lookup the ARN and
+ replace the name.
+
+ :param rules: a list of rule dicts
+ :return: the same list of dicts ensuring that each rule Actions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
+ """
+
+ fixed_rules = []
+ for rule in rules:
+ fixed_actions = []
+ for action in rule['Actions']:
+ if 'TargetGroupName' in action:
+ action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName'])
+ del action['TargetGroupName']
+ fixed_actions.append(action)
+ rule['Actions'] = fixed_actions
+ fixed_rules.append(rule)
+
+ return fixed_rules
+
+ def _get_elb_listener_rules(self):
+
+ try:
+ return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def _compare_condition(self, current_conditions, condition):
+ """
+
+ :param current_conditions:
+ :param condition:
+ :return:
+ """
+
+ condition_found = False
+
+ for current_condition in current_conditions:
+ # host-header: current_condition includes both HostHeaderConfig AND Values while
+ # condition can be defined with either HostHeaderConfig OR Values. Only use
+ # HostHeaderConfig['Values'] comparison if both conditions includes HostHeaderConfig.
+ if current_condition.get('HostHeaderConfig') and condition.get('HostHeaderConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HostHeaderConfig']['Values']) == sorted(condition['HostHeaderConfig']['Values'])):
+ condition_found = True
+ break
+ elif current_condition.get('HttpHeaderConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HttpHeaderConfig']['Values']) == sorted(condition['HttpHeaderConfig']['Values']) and
+ current_condition['HttpHeaderConfig']['HttpHeaderName'] == condition['HttpHeaderConfig']['HttpHeaderName']):
+ condition_found = True
+ break
+ elif current_condition.get('HttpRequestMethodConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HttpRequestMethodConfig']['Values']) == sorted(condition['HttpRequestMethodConfig']['Values'])):
+ condition_found = True
+ break
+ # path-pattern: current_condition includes both PathPatternConfig AND Values while
+ # condition can be defined with either PathPatternConfig OR Values. Only use
+ # PathPatternConfig['Values'] comparison if both conditions includes PathPatternConfig.
+ elif current_condition.get('PathPatternConfig') and condition.get('PathPatternConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['PathPatternConfig']['Values']) == sorted(condition['PathPatternConfig']['Values'])):
+ condition_found = True
+ break
+ elif current_condition.get('QueryStringConfig'):
+ # QueryString Values is not sorted as it is the only list of dicts (not strings).
+ if (current_condition['Field'] == condition['Field'] and
+ current_condition['QueryStringConfig']['Values'] == condition['QueryStringConfig']['Values']):
+ condition_found = True
+ break
+ elif current_condition.get('SourceIpConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['SourceIpConfig']['Values']) == sorted(condition['SourceIpConfig']['Values'])):
+ condition_found = True
+ break
+ # Not all fields are required to have Values list nested within a *Config dict
+ # e.g. fields host-header/path-pattern can directly list Values
+ elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']):
+ condition_found = True
+ break
+
+ return condition_found
+
+ def _compare_rule(self, current_rule, new_rule):
+ """
+
+ :return:
+ """
+
+ modified_rule = {}
+
+ # Priority
+ if int(current_rule['Priority']) != int(new_rule['Priority']):
+ modified_rule['Priority'] = new_rule['Priority']
+
+ # Actions
+
+ # Check proper rule format on current listener
+ if len(current_rule['Actions']) > 1:
+ for action in current_rule['Actions']:
+ if 'Order' not in action:
+ self.module.fail_json(msg="'Order' key not found in actions. "
+ "installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+
+ # If the lengths of the actions are the same, we'll have to verify that the
+ # contents of those actions are the same
+ if len(current_rule['Actions']) == len(new_rule['Actions']):
+ # if actions have just one element, compare the contents and then update if
+ # they're different
+ current_actions_sorted = _sort_actions(current_rule['Actions'])
+ new_actions_sorted = _sort_actions(new_rule['Actions'])
+
+ new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
+
+ if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]:
+ modified_rule['Actions'] = new_rule['Actions']
+ # If the action lengths are different, then replace with the new actions
+ else:
+ modified_rule['Actions'] = new_rule['Actions']
+
+ # Conditions
+ modified_conditions = []
+ for condition in new_rule['Conditions']:
+ if not self._compare_condition(current_rule['Conditions'], condition):
+ modified_conditions.append(condition)
+
+ if modified_conditions:
+ modified_rule['Conditions'] = modified_conditions
+
+ return modified_rule
+
+ def compare_rules(self):
+ """
+
+ :return:
+ """
+
+ rules_to_modify = []
+ rules_to_delete = []
+ rules_to_add = deepcopy(self.rules)
+
+ for current_rule in self.current_rules:
+ current_rule_passed_to_module = False
+ for new_rule in self.rules[:]:
+ if current_rule['Priority'] == str(new_rule['Priority']):
+ current_rule_passed_to_module = True
+ # Remove what we match so that what is left can be marked as 'to be added'
+ rules_to_add.remove(new_rule)
+ modified_rule = self._compare_rule(current_rule, new_rule)
+ if modified_rule:
+ modified_rule['Priority'] = int(current_rule['Priority'])
+ modified_rule['RuleArn'] = current_rule['RuleArn']
+ modified_rule['Actions'] = new_rule['Actions']
+ modified_rule['Conditions'] = new_rule['Conditions']
+ rules_to_modify.append(modified_rule)
+ break
+
+ # If the current rule was not matched against passed rules, mark for removal
+ if not current_rule_passed_to_module and not current_rule['IsDefault']:
+ rules_to_delete.append(current_rule['RuleArn'])
+
+ return rules_to_add, rules_to_modify, rules_to_delete
+
+
+class ELBListenerRule(object):
+
+ def __init__(self, connection, module, rule, listener_arn):
+
+ self.connection = connection
+ self.module = module
+ self.rule = rule
+ self.listener_arn = listener_arn
+ self.changed = False
+
+ def create(self):
+ """
+ Create a listener rule
+
+ :return:
+ """
+
+ try:
+ self.rule['ListenerArn'] = self.listener_arn
+ self.rule['Priority'] = int(self.rule['Priority'])
+ AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule)
+ except (BotoCoreError, ClientError) as e:
+ if '"Order", must be one of: Type, TargetGroupArn' in str(e):
+ self.module.fail_json(msg="installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+ else:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def modify(self):
+ """
+ Modify a listener rule
+
+ :return:
+ """
+
+ try:
+ del self.rule['Priority']
+ AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule)
+ except (BotoCoreError, ClientError) as e:
+ if '"Order", must be one of: Type, TargetGroupArn' in str(e):
+ self.module.fail_json(msg="installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+ else:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def delete(self):
+ """
+ Delete a listener rule
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn'])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
new file mode 100644
index 00000000..6ebed23b
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_native
+
+from .ec2 import AWSRetry
+from .core import is_boto3_error_code
+from .core import parse_aws_arn
+
+
+def get_aws_account_id(module):
+ """ Given an AnsibleAWSModule instance, get the active AWS account ID
+ """
+
+ return get_aws_account_info(module)[0]
+
+
+def get_aws_account_info(module):
+ """Given an AnsibleAWSModule instance, return the account information
+ (account id and partition) we are currently working on
+
+ get_account_info tries too find out the account that we are working
+ on. It's not guaranteed that this will be easy so we try in
+ several different ways. Giving either IAM or STS privileges to
+ the account should be enough to permit this.
+
+ Tries:
+ - sts:GetCallerIdentity
+ - iam:GetUser
+ - sts:DecodeAuthorizationMessage
+ """
+ account_id = None
+ partition = None
+ try:
+ sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+ caller_id = sts_client.get_caller_identity(aws_retry=True)
+ account_id = caller_id.get('Account')
+ partition = caller_id.get('Arn').split(':')[1]
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError):
+ try:
+ iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':')
+ except is_boto3_error_code('AccessDenied') as e:
+ try:
+ except_msg = to_native(e.message)
+ except AttributeError:
+ except_msg = to_native(e)
+ result = parse_aws_arn(except_msg)
+ if result is None or result['service'] != 'iam':
+ module.fail_json_aws(
+ e,
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+ account_id = result.get('account_id')
+ partition = result.get('partition')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e,
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+
+ if account_id is None or partition is None:
+ module.fail_json(
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+
+ return (to_native(account_id), to_native(partition))
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/modules.py b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
new file mode 100644
index 00000000..39a207a9
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
@@ -0,0 +1,451 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module adds shared support for generic Amazon AWS modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+ module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
+ mutually_exclusive=list1, required_together=list2)
+
+The 'AnsibleAWSModule' module provides similar, but more restricted,
+interfaces to the normal Ansible module. It also includes the
+additional methods for connecting to AWS using the standard module arguments
+
+ m.resource('lambda') # - get an AWS connection as a boto3 resource.
+
+or
+
+ m.client('sts') # - get an AWS connection as a boto3 client.
+
+To make use of AWSRetry easier, it can now be wrapped around any call from a
+module-created client. To add retries to a client, create a client:
+
+ m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+ ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from functools import wraps
+import logging
+import os
+import re
+import traceback
+
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ # Python 3
+ from io import StringIO
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils._text import to_native
+
+from .botocore import HAS_BOTO3
+from .botocore import boto3_conn
+from .botocore import get_aws_connection_info
+from .botocore import get_aws_region
+from .botocore import gather_sdk_versions
+
+from .version import LooseVersion
+
+# Currently only AnsibleAWSModule. However we have a lot of Copy and Paste code
+# for Inventory and Lookup modules which we should refactor
+
+
+class AnsibleAWSModule(object):
+ """An ansible module class for AWS modules
+
+ AnsibleAWSModule provides an a class for building modules which
+ connect to Amazon Web Services. The interface is currently more
+ restricted than the basic module class with the aim that later the
+ basic module class can be reduced. If you find that any key
+ feature is missing please contact the author/Ansible AWS team
+ (available on #ansible-aws on IRC) to request the additional
+ features needed.
+ """
+ default_settings = {
+ "default_args": True,
+ "check_boto3": True,
+ "auto_retry": True,
+ "module_class": AnsibleModule
+ }
+
+ def __init__(self, **kwargs):
+ local_settings = {}
+ for key in AnsibleAWSModule.default_settings:
+ try:
+ local_settings[key] = kwargs.pop(key)
+ except KeyError:
+ local_settings[key] = AnsibleAWSModule.default_settings[key]
+ self.settings = local_settings
+
+ if local_settings["default_args"]:
+ argument_spec_full = aws_argument_spec()
+ try:
+ argument_spec_full.update(kwargs["argument_spec"])
+ except (TypeError, NameError):
+ pass
+ kwargs["argument_spec"] = argument_spec_full
+
+ self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
+
+ if local_settings["check_boto3"]:
+ if not HAS_BOTO3:
+ self._module.fail_json(
+ msg=missing_required_lib('botocore and boto3'))
+ if not self.botocore_at_least('1.21.0'):
+ self.warn('botocore < 1.21.0 is not supported or tested.'
+ ' Some features may not work.')
+ if not self.boto3_at_least("1.18.0"):
+ self.warn('boto3 < 1.18.0 is not supported or tested.'
+ ' Some features may not work.')
+
+ deprecated_vars = {'EC2_REGION', 'EC2_SECURITY_TOKEN', 'EC2_SECRET_KEY', 'EC2_ACCESS_KEY',
+ 'EC2_URL', 'S3_URL'}
+ if deprecated_vars.intersection(set(os.environ.keys())):
+ self._module.deprecate(
+ "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', "
+ "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment "
+ "variables has been deprecated. "
+ "These variables are currently used for all AWS services which can "
+ "cause confusion. We recomend using the relevant module "
+ "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', "
+ "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' "
+ "environment variables can be used instead.",
+ date='2024-12-01', collection_name='amazon.aws',
+ )
+
+ if 'AWS_SECURITY_TOKEN' in os.environ.keys():
+ self._module.deprecate(
+ "Support for the 'AWS_SECURITY_TOKEN' environment variable "
+ "has been deprecated. This variable was based on the original "
+ "boto SDK, support for which has now been dropped. "
+ "We recommend using the 'session_token' module parameter "
+ "or alternatively the 'AWS_SESSION_TOKEN' environment variable "
+ "can be used instead.",
+ date='2024-12-01', collection_name='amazon.aws',
+ )
+
+ self.check_mode = self._module.check_mode
+ self._diff = self._module._diff
+ self._name = self._module._name
+
+ self._botocore_endpoint_log_stream = StringIO()
+ self.logger = None
+ if self.params.get('debug_botocore_endpoint_logs'):
+ self.logger = logging.getLogger('botocore.endpoint')
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
+
+ @property
+ def params(self):
+ return self._module.params
+
+ def _get_resource_action_list(self):
+ actions = []
+ for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
+ ln = ln.strip()
+ if not ln:
+ continue
+ found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
+ if found_operational_request:
+ operation_request = found_operational_request.group(0)[20:-1]
+ resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
+ actions.append("{0}:{1}".format(resource, operation_request))
+ return list(set(actions))
+
+ def exit_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.exit_json(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.fail_json(*args, **kwargs)
+
+ def debug(self, *args, **kwargs):
+ return self._module.debug(*args, **kwargs)
+
+ def warn(self, *args, **kwargs):
+ return self._module.warn(*args, **kwargs)
+
+ def deprecate(self, *args, **kwargs):
+ return self._module.deprecate(*args, **kwargs)
+
+ def boolean(self, *args, **kwargs):
+ return self._module.boolean(*args, **kwargs)
+
+ def md5(self, *args, **kwargs):
+ return self._module.md5(*args, **kwargs)
+
+ def client(self, service, retry_decorator=None):
+ region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ conn = boto3_conn(self, conn_type='client', resource=service,
+ region=region, endpoint=endpoint_url, **aws_connect_kwargs)
+ return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
+
+ def resource(self, service):
+ region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ return boto3_conn(self, conn_type='resource', resource=service,
+ region=region, endpoint=endpoint_url, **aws_connect_kwargs)
+
+ @property
+ def region(self):
+ return get_aws_region(self, True)
+
+ def fail_json_aws(self, exception, msg=None, **kwargs):
+ """call fail_json with processed exception
+
+ function for converting exceptions thrown by AWS SDK modules,
+ botocore, boto3 and boto, into nice error messages.
+ """
+ last_traceback = traceback.format_exc()
+
+ # to_native is trusted to handle exceptions that str() could
+ # convert to text.
+ try:
+ except_msg = to_native(exception.message)
+ except AttributeError:
+ except_msg = to_native(exception)
+
+ if msg is not None:
+ message = '{0}: {1}'.format(msg, except_msg)
+ else:
+ message = except_msg
+
+ try:
+ response = exception.response
+ except AttributeError:
+ response = None
+
+ failure = dict(
+ msg=message,
+ exception=last_traceback,
+ **self._gather_versions()
+ )
+
+ failure.update(kwargs)
+
+ if response is not None:
+ failure.update(**camel_dict_to_snake_dict(response))
+
+ self.fail_json(**failure)
+
+ def _gather_versions(self):
+ """Gather AWS SDK (boto3 and botocore) dependency versions
+
+ Returns {'boto3_version': str, 'botocore_version': str}
+ Returns {} if either is not installed
+ """
+ return gather_sdk_versions()
+
+ def require_boto3_at_least(self, desired, **kwargs):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ calls fail_json() when the boto3 version is less than the desired
+ version
+
+ Usage:
+ module.require_boto3_at_least("1.2.3", reason="to update tags")
+ module.require_boto3_at_least("1.1.1")
+
+ :param desired the minimum desired version
+ :param reason why the version is required (optional)
+ """
+ if not self.boto3_at_least(desired):
+ self._module.fail_json(
+ msg=missing_required_lib('boto3>={0}'.format(desired), **kwargs),
+ **self._gather_versions()
+ )
+
+ def boto3_at_least(self, desired):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ Usage:
+ if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
+ # conditionally fail on old boto3 versions if a specific feature is not supported
+ module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
+
+ def require_botocore_at_least(self, desired, **kwargs):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ calls fail_json() when the botocore version is less than the desired
+ version
+
+ Usage:
+ module.require_botocore_at_least("1.2.3", reason="to update tags")
+ module.require_botocore_at_least("1.1.1")
+
+ :param desired the minimum desired version
+ :param reason why the version is required (optional)
+ """
+ if not self.botocore_at_least(desired):
+ self._module.fail_json(
+ msg=missing_required_lib('botocore>={0}'.format(desired), **kwargs),
+ **self._gather_versions()
+ )
+
+ def botocore_at_least(self, desired):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ Usage:
+ if not module.botocore_at_least('1.2.3'):
+ module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
+ if not module.botocore_at_least('1.5.3'):
+ module.warn('Botocore did not include waiters for Service X before 1.5.3. '
+ 'To wait until Service X resources are fully available, update botocore.')
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
+
+
+class _RetryingBotoClientWrapper(object):
+ __never_wait = (
+ 'get_paginator', 'can_paginate',
+ 'get_waiter', 'generate_presigned_url',
+ )
+
+ def __init__(self, client, retry):
+ self.client = client
+ self.retry = retry
+
+ def _create_optional_retry_wrapper_function(self, unwrapped):
+ retrying_wrapper = self.retry(unwrapped)
+
+ @wraps(unwrapped)
+ def deciding_wrapper(aws_retry=False, *args, **kwargs):
+ if aws_retry:
+ return retrying_wrapper(*args, **kwargs)
+ else:
+ return unwrapped(*args, **kwargs)
+ return deciding_wrapper
+
+ def __getattr__(self, name):
+ unwrapped = getattr(self.client, name)
+ if name in self.__never_wait:
+ return unwrapped
+ elif callable(unwrapped):
+ wrapped = self._create_optional_retry_wrapper_function(unwrapped)
+ setattr(self, name, wrapped)
+ return wrapped
+ else:
+ return unwrapped
+
+
+def _aws_common_argument_spec():
+ """
+ This does not include 'region' as some AWS APIs don't require a
+ region. However, it's not recommended to do this as it means module_defaults
+ can't include the region parameter.
+ """
+ return dict(
+ access_key=dict(
+ aliases=['aws_access_key_id', 'aws_access_key', 'ec2_access_key'],
+ deprecated_aliases=[
+ dict(name='ec2_access_key', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ fallback=(env_fallback, ['AWS_ACCESS_KEY_ID', 'AWS_ACCESS_KEY', 'EC2_ACCESS_KEY']),
+ no_log=False,
+ ),
+ secret_key=dict(
+ aliases=['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'],
+ deprecated_aliases=[
+ dict(name='ec2_secret_key', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ fallback=(env_fallback, ['AWS_SECRET_ACCESS_KEY', 'AWS_SECRET_KEY', 'EC2_SECRET_KEY']),
+ no_log=True,
+ ),
+ session_token=dict(
+ aliases=['aws_session_token', 'security_token', 'access_token', 'aws_security_token'],
+ deprecated_aliases=[
+ dict(name='access_token', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name='security_token', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name='aws_security_token', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ fallback=(env_fallback, ['AWS_SESSION_TOKEN', 'AWS_SECURITY_TOKEN', 'EC2_SECURITY_TOKEN']),
+ no_log=True,
+ ),
+ profile=dict(
+ aliases=['aws_profile'],
+ fallback=(env_fallback, ['AWS_PROFILE', 'AWS_DEFAULT_PROFILE']),
+ ),
+
+ endpoint_url=dict(
+ aliases=['aws_endpoint_url', 'ec2_url', 's3_url'],
+ deprecated_aliases=[
+ dict(name='ec2_url', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name='s3_url', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ fallback=(env_fallback, ['AWS_URL', 'EC2_URL', 'S3_URL']),
+ ),
+ validate_certs=dict(
+ type='bool',
+ default=True,
+ ),
+ aws_ca_bundle=dict(
+ type='path',
+ fallback=(env_fallback, ['AWS_CA_BUNDLE']),
+ ),
+ aws_config=dict(
+ type='dict',
+ ),
+ debug_botocore_endpoint_logs=dict(
+ type='bool',
+ default=False,
+ fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']),
+ ),
+ )
+
+
+def aws_argument_spec():
+ """
+ Returns a dictionary containing the argument_spec common to all AWS modules.
+ """
+ region_spec = dict(
+ region=dict(
+ aliases=['aws_region', 'ec2_region'],
+ deprecated_aliases=[
+ dict(name='ec2_region', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ fallback=(env_fallback, ['AWS_REGION', 'AWS_DEFAULT_REGION', 'EC2_REGION']),
+ ),
+ )
+ spec = _aws_common_argument_spec()
+ spec.update(region_spec)
+ return spec
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/policy.py b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
new file mode 100644
index 00000000..4aeabd5f
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
@@ -0,0 +1,179 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from functools import cmp_to_key
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import binary_type
+from ansible.module_utils.six import string_types
+
+
+def _hashable_policy(policy, policy_list):
+ """
+ Takes a policy and returns a list, the contents of which are all hashable and sorted.
+ Example input policy:
+ {'Version': '2012-10-17',
+ 'Statement': [{'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }]}
+ Returned value:
+ [('Statement', ((('Action', ('s3:PutObjectAcl',)),
+ ('Effect', ('Allow',)),
+ ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
+ ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))),
+ ('Version', ('2012-10-17',)))]
+
+ """
+ # Amazon will automatically convert bool and int to strings for us
+ if isinstance(policy, bool):
+ return tuple([str(policy).lower()])
+ elif isinstance(policy, int):
+ return tuple([str(policy)])
+
+ if isinstance(policy, list):
+ for each in policy:
+ tupleified = _hashable_policy(each, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append(tupleified)
+ elif isinstance(policy, string_types) or isinstance(policy, binary_type):
+ policy = to_text(policy)
+ # convert root account ARNs to just account IDs
+ if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
+ policy = policy.split(':')[4]
+ return [policy]
+ elif isinstance(policy, dict):
+ sorted_keys = list(policy.keys())
+ sorted_keys.sort()
+ for key in sorted_keys:
+ element = policy[key]
+ # Special case defined in
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html
+ if key in ["NotPrincipal", "Principal"] and policy[key] == "*":
+ element = {"AWS": "*"}
+ tupleified = _hashable_policy(element, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append((key, tupleified))
+
+ # ensure we aren't returning deeply nested structures of length 1
+ if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
+ policy_list = policy_list[0]
+ if isinstance(policy_list, list):
+ policy_list.sort(key=cmp_to_key(_py3cmp))
+ return policy_list
+
+
+def _py3cmp(a, b):
+ """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
+ try:
+ if a > b:
+ return 1
+ elif a < b:
+ return -1
+ else:
+ return 0
+ except TypeError as e:
+ # check to see if they're tuple-string
+ # always say strings are less than tuples (to maintain compatibility with python2)
+ str_ind = to_text(e).find('str')
+ tup_ind = to_text(e).find('tuple')
+ if -1 not in (str_ind, tup_ind):
+ if str_ind < tup_ind:
+ return -1
+ elif tup_ind < str_ind:
+ return 1
+ raise
+
+
+def compare_policies(current_policy, new_policy, default_version="2008-10-17"):
+ """ Compares the existing policy and the updated policy
+ Returns True if there is a difference between policies.
+ """
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html
+ if default_version:
+ if isinstance(current_policy, dict):
+ current_policy = current_policy.copy()
+ current_policy.setdefault("Version", default_version)
+ if isinstance(new_policy, dict):
+ new_policy = new_policy.copy()
+ new_policy.setdefault("Version", default_version)
+
+ return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
+
+
+def sort_json_policy_dict(policy_dict):
+
+ """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
+ different orders will return true
+ Args:
+ policy_dict (dict): Dict representing IAM JSON policy.
+ Basic Usage:
+ >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
+ >>> sort_json_policy_dict(my_iam_policy)
+ Returns:
+ Dict: Will return a copy of the policy as a Dict but any List will be sorted
+ {
+ 'Principle': {
+ 'AWS': [ '7', '14', '31', '101' ]
+ }
+ }
+ """
+
+ def value_is_list(my_list):
+
+ checked_list = []
+ for item in my_list:
+ if isinstance(item, dict):
+ checked_list.append(sort_json_policy_dict(item))
+ elif isinstance(item, list):
+ checked_list.append(value_is_list(item))
+ else:
+ checked_list.append(item)
+
+ # Sort list. If it's a list of dictionaries, sort by tuple of key-value
+ # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
+ checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
+ return checked_list
+
+ ordered_policy_dict = {}
+ for key, value in policy_dict.items():
+ if isinstance(value, dict):
+ ordered_policy_dict[key] = sort_json_policy_dict(value)
+ elif isinstance(value, list):
+ ordered_policy_dict[key] = value_is_list(value)
+ else:
+ ordered_policy_dict[key] = value
+
+ return ordered_policy_dict
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
new file mode 100644
index 00000000..71981464
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
@@ -0,0 +1,387 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import namedtuple
+from time import sleep
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, WaiterError
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+from .ec2 import compare_aws_tags
+from .waiters import get_waiter
+
+Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'resource', 'retry_codes'])
+# Whitelist boto3 client methods for cluster and instance resources
+cluster_method_names = [
+ 'create_db_cluster', 'restore_db_cluster_from_snapshot', 'restore_db_cluster_from_s3',
+ 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
+ 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
+]
+instance_method_names = [
+ 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
+ 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
+ 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
+ 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance', 'add_role_to_db_instance',
+ 'remove_role_from_db_instance'
+]
+
+cluster_snapshot_method_names = [
+ 'create_db_cluster_snapshot', 'delete_db_cluster_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource',
+ 'list_tags_for_resource', 'copy_db_cluster_snapshot'
+]
+
+instance_snapshot_method_names = [
+ 'create_db_snapshot', 'delete_db_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource',
+ 'copy_db_snapshot', 'list_tags_for_resource'
+]
+
+
+def get_rds_method_attribute(method_name, module):
+ '''
+ Returns rds attributes of the specified method.
+
+ Parameters:
+ method_name (str): RDS method to call
+ module: AnsibleAWSModule
+
+ Returns:
+ Boto3ClientMethod (dict):
+ name (str): Name of method
+ waiter (str): Name of waiter associated with given method
+ operation_description (str): Description of method
+ resource (str): Type of resource this method applies to
+ One of ['instance', 'cluster', 'instance_snapshot', 'cluster_snapshot']
+ retry_codes (list): List of extra error codes to retry on
+
+ Raises:
+ NotImplementedError if wait is True but no waiter can be found for specified method
+ '''
+ waiter = ''
+ readable_op = method_name.replace('_', ' ').replace('db', 'DB')
+ resource = ''
+ retry_codes = []
+ if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
+ resource = 'cluster'
+ if method_name == 'delete_db_cluster':
+ waiter = 'cluster_deleted'
+ else:
+ waiter = 'cluster_available'
+ # Handle retry codes
+ if method_name == 'restore_db_cluster_from_snapshot':
+ retry_codes = ['InvalidDBClusterSnapshotState']
+ else:
+ retry_codes = ['InvalidDBClusterState']
+ elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
+ resource = 'instance'
+ if method_name == 'delete_db_instance':
+ waiter = 'db_instance_deleted'
+ elif method_name == 'stop_db_instance':
+ waiter = 'db_instance_stopped'
+ elif method_name == 'add_role_to_db_instance':
+ waiter = 'role_associated'
+ elif method_name == 'remove_role_from_db_instance':
+ waiter = 'role_disassociated'
+ elif method_name == 'promote_read_replica':
+ waiter = 'read_replica_promoted'
+ else:
+ waiter = 'db_instance_available'
+ # Handle retry codes
+ if method_name == 'restore_db_instance_from_db_snapshot':
+ retry_codes = ['InvalidDBSnapshotState']
+ else:
+ retry_codes = ['InvalidDBInstanceState', 'InvalidDBSecurityGroupState']
+ elif method_name in cluster_snapshot_method_names and 'db_cluster_snapshot_identifier' in module.params:
+ resource = 'cluster_snapshot'
+ if method_name == 'delete_db_cluster_snapshot':
+ waiter = 'db_cluster_snapshot_deleted'
+ retry_codes = ['InvalidDBClusterSnapshotState']
+ elif method_name == 'create_db_cluster_snapshot':
+ waiter = 'db_cluster_snapshot_available'
+ retry_codes = ['InvalidDBClusterState']
+ else:
+ # Tagging
+ waiter = 'db_cluster_snapshot_available'
+ retry_codes = ['InvalidDBClusterSnapshotState']
+ elif method_name in instance_snapshot_method_names and 'db_snapshot_identifier' in module.params:
+ resource = 'instance_snapshot'
+ if method_name == 'delete_db_snapshot':
+ waiter = 'db_snapshot_deleted'
+ retry_codes = ['InvalidDBSnapshotState']
+ elif method_name == 'create_db_snapshot':
+ waiter = 'db_snapshot_available'
+ retry_codes = ['InvalidDBInstanceState']
+ else:
+ # Tagging
+ waiter = 'db_snapshot_available'
+ retry_codes = ['InvalidDBSnapshotState']
+ else:
+ if module.params.get('wait'):
+ raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name))
+
+ return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op,
+ resource=resource, retry_codes=retry_codes)
+
+
+def get_final_identifier(method_name, module):
+ updated_identifier = None
+ apply_immediately = module.params.get('apply_immediately')
+ resource = get_rds_method_attribute(method_name, module).resource
+ if resource == 'cluster':
+ identifier = module.params['db_cluster_identifier']
+ updated_identifier = module.params['new_db_cluster_identifier']
+ elif resource == 'instance':
+ identifier = module.params['db_instance_identifier']
+ updated_identifier = module.params['new_db_instance_identifier']
+ elif resource == 'instance_snapshot':
+ identifier = module.params['db_snapshot_identifier']
+ elif resource == 'cluster_snapshot':
+ identifier = module.params['db_cluster_snapshot_identifier']
+ else:
+ raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name))
+ if not module.check_mode and updated_identifier and apply_immediately:
+ identifier = updated_identifier
+ return identifier
+
+
+def handle_errors(module, exception, method_name, parameters):
+
+ if not isinstance(exception, ClientError):
+ module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
+
+ changed = True
+ error_code = exception.response['Error']['Code']
+ if (
+ method_name in ('modify_db_instance', 'modify_db_cluster') and
+ error_code == 'InvalidParameterCombination'
+ ):
+ if 'No modifications were requested' in to_text(exception):
+ changed = False
+ elif 'ModifyDbCluster API' in to_text(exception):
+ module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
+ if 'DB Instance is not a read replica' in to_text(exception):
+ changed = False
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ elif method_name == 'promote_read_replica_db_cluster' and error_code == 'InvalidDBClusterStateFault':
+ if 'DB Cluster that is not a read replica' in to_text(exception):
+ changed = False
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ elif method_name == 'create_db_cluster' and error_code == 'InvalidParameterValue':
+ accepted_engines = [
+ 'aurora', 'aurora-mysql', 'aurora-postgresql'
+ ]
+ if parameters.get('Engine') not in accepted_engines:
+ module.fail_json_aws(exception, msg='DB engine {0} should be one of {1}'.format(parameters.get('Engine'), accepted_engines))
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+
+ return changed
+
+
+def call_method(client, module, method_name, parameters):
+ result = {}
+ changed = True
+ if not module.check_mode:
+ wait = module.params.get('wait')
+ retry_codes = get_rds_method_attribute(method_name, module).retry_codes
+ method = getattr(client, method_name)
+ try:
+ result = AWSRetry.jittered_backoff(catch_extra_error_codes=retry_codes)(method)(**parameters)
+ except (BotoCoreError, ClientError) as e:
+ changed = handle_errors(module, e, method_name, parameters)
+
+ if wait and changed:
+ identifier = get_final_identifier(method_name, module)
+ wait_for_status(client, module, identifier, method_name)
+ return result, changed
+
+
+def wait_for_instance_status(client, module, db_instance_id, waiter_name):
+ def wait(client, db_instance_id, waiter_name):
+ try:
+ waiter = client.get_waiter(waiter_name)
+ except ValueError:
+ # using a waiter in module_utils/waiters.py
+ waiter = get_waiter(client, waiter_name)
+ waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
+
+ waiter_expected_status = {
+ 'db_instance_deleted': 'deleted',
+ 'db_instance_stopped': 'stopped',
+ }
+ expected_status = waiter_expected_status.get(waiter_name, 'available')
+ for _wait_attempts in range(0, 10):
+ try:
+ wait(client, db_instance_id, waiter_name)
+ break
+ except WaiterError as e:
+ # Instance may be renamed and AWSRetry doesn't handle WaiterError
+ if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
+ sleep(10)
+ continue
+ module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
+ db_instance_id, expected_status)
+ )
+
+
+def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
+ try:
+ get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
+ except WaiterError as e:
+ if waiter_name == 'cluster_deleted':
+ msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
+ else:
+ msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
+ module.fail_json_aws(e, msg=msg)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
+
+
+def wait_for_instance_snapshot_status(client, module, db_snapshot_id, waiter_name):
+ try:
+ client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id)
+ except WaiterError as e:
+ if waiter_name == 'db_snapshot_deleted':
+ msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id)
+ else:
+ msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id)
+ module.fail_json_aws(e, msg=msg)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB snapshot {0}".format(db_snapshot_id))
+
+
+def wait_for_cluster_snapshot_status(client, module, db_snapshot_id, waiter_name):
+ try:
+ client.get_waiter(waiter_name).wait(DBClusterSnapshotIdentifier=db_snapshot_id)
+ except WaiterError as e:
+ if waiter_name == 'db_cluster_snapshot_deleted':
+ msg = "Failed to wait for DB cluster snapshot {0} to be deleted".format(db_snapshot_id)
+ else:
+ msg = "Failed to wait for DB cluster snapshot {0} to be available".format(db_snapshot_id)
+ module.fail_json_aws(e, msg=msg)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster snapshot {0}".format(db_snapshot_id))
+
+
+def wait_for_status(client, module, identifier, method_name):
+ rds_method_attributes = get_rds_method_attribute(method_name, module)
+ waiter_name = rds_method_attributes.waiter
+ resource = rds_method_attributes.resource
+
+ if resource == 'cluster':
+ wait_for_cluster_status(client, module, identifier, waiter_name)
+ elif resource == 'instance':
+ wait_for_instance_status(client, module, identifier, waiter_name)
+ elif resource == 'instance_snapshot':
+ wait_for_instance_snapshot_status(client, module, identifier, waiter_name)
+ elif resource == 'cluster_snapshot':
+ wait_for_cluster_snapshot_status(client, module, identifier, waiter_name)
+
+
+def get_tags(client, module, resource_arn):
+ try:
+ return boto3_tag_list_to_ansible_dict(
+ client.list_tags_for_resource(ResourceName=resource_arn)['TagList']
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe tags")
+
+
+def arg_spec_to_rds_params(options_dict):
+ tags = options_dict.pop('tags')
+ has_processor_features = False
+ if 'processor_features' in options_dict:
+ has_processor_features = True
+ processor_features = options_dict.pop('processor_features')
+ camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
+ for key in list(camel_options.keys()):
+ for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
+ if old in key:
+ camel_options[key.replace(old, new)] = camel_options.pop(key)
+ camel_options['Tags'] = tags
+ if has_processor_features:
+ camel_options['ProcessorFeatures'] = processor_features
+ return camel_options
+
+
+def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
+ if tags is None:
+ return False
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
+ changed = bool(tags_to_add or tags_to_remove)
+ if tags_to_add:
+ call_method(
+ client, module, method_name='add_tags_to_resource',
+ parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
+ )
+ if tags_to_remove:
+ call_method(
+ client, module, method_name='remove_tags_from_resource',
+ parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
+ )
+ return changed
+
+
+def compare_iam_roles(existing_roles, target_roles, purge_roles):
+ '''
+ Returns differences between target and existing IAM roles
+
+ Parameters:
+ existing_roles (list): Existing IAM roles
+ target_roles (list): Target IAM roles
+ purge_roles (bool): Remove roles not in target_roles if True
+
+ Returns:
+ roles_to_add (list): List of IAM roles to add
+ roles_to_delete (list): List of IAM roles to delete
+ '''
+ existing_roles = [dict((k, v) for k, v in role.items() if k != 'status') for role in existing_roles]
+ roles_to_add = [role for role in target_roles if role not in existing_roles]
+ roles_to_remove = [role for role in existing_roles if role not in target_roles] if purge_roles else []
+ return roles_to_add, roles_to_remove
+
+
+def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove):
+ '''
+ Update a DB instance's associated IAM roles
+
+ Parameters:
+ client: RDS client
+ module: AnsibleAWSModule
+ instance_id (str): DB's instance ID
+ roles_to_add (list): List of IAM roles to add
+ roles_to_delete (list): List of IAM roles to delete
+
+ Returns:
+ changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not
+ '''
+ for role in roles_to_remove:
+ params = {'DBInstanceIdentifier': instance_id,
+ 'RoleArn': role['role_arn'],
+ 'FeatureName': role['feature_name']}
+ _result, changed = call_method(client, module, method_name='remove_role_from_db_instance', parameters=params)
+ for role in roles_to_add:
+ params = {'DBInstanceIdentifier': instance_id,
+ 'RoleArn': role['role_arn'],
+ 'FeatureName': role['feature_name']}
+ _result, changed = call_method(client, module, method_name='add_role_to_db_instance', parameters=params)
+ return changed
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/retries.py b/ansible_collections/amazon/aws/plugins/module_utils/retries.py
new file mode 100644
index 00000000..1bd214b6
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/retries.py
@@ -0,0 +1,78 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from .cloud import CloudRetry
+
+
+def _botocore_exception_maybe():
+ """
+ Allow for boto3 not being installed when using these utils by wrapping
+ botocore.exceptions instead of assigning from it directly.
+ """
+ if HAS_BOTO3:
+ return ClientError
+ return type(None)
+
+
+class AWSRetry(CloudRetry):
+ base_class = _botocore_exception_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.response['Error']['Code']
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This list of failures is based on this API Reference
+ # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
+ #
+ # TooManyRequestsException comes from inside botocore when it
+ # does retrys, unfortunately however it does not try long
+ # enough to allow some services such as API Gateway to
+ # complete configuration. At the moment of writing there is a
+ # botocore/boto3 bug open to fix this.
+ #
+ # https://github.com/boto/boto3/issues/876 (and linked PRs etc)
+ retry_on = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling'
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/route53.py b/ansible_collections/amazon/aws/plugins/module_utils/route53.py
new file mode 100644
index 00000000..3e2940a5
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/route53.py
@@ -0,0 +1,64 @@
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+
+def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags):
+ if new_tags is None:
+ return False
+
+ old_tags = get_tags(module, client, resource_type, resource_id)
+ tags_to_set, tags_to_delete = compare_aws_tags(old_tags, new_tags, purge_tags=purge_tags)
+
+ change_params = dict()
+ if tags_to_set:
+ change_params['AddTags'] = ansible_dict_to_boto3_tag_list(tags_to_set)
+ if tags_to_delete:
+ change_params['RemoveTagKeys'] = tags_to_delete
+
+ if not change_params:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.change_tags_for_resource(
+ ResourceType=resource_type,
+ ResourceId=resource_id,
+ **change_params
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to update tags on {0}'.format(resource_type),
+ resource_id=resource_id, change_params=change_params)
+ return True
+
+
+def get_tags(module, client, resource_type, resource_id):
+ try:
+ tagset = client.list_tags_for_resource(
+ ResourceType=resource_type,
+ ResourceId=resource_id,
+ )
+ except is_boto3_error_code('NoSuchHealthCheck'):
+ return {}
+ except is_boto3_error_code('NoSuchHostedZone'): # pylint: disable=duplicate-except
+ return {}
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to fetch tags on {0}'.format(resource_type),
+ resource_id=resource_id)
+
+ tags = boto3_tag_list_to_ansible_dict(tagset['ResourceTagSet']['Tags'])
+ return tags
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
new file mode 100644
index 00000000..c13c91f2
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by the calling module
+
+HAS_MD5 = True
+try:
+ from hashlib import md5
+except ImportError:
+ try:
+ from md5 import md5
+ except ImportError:
+ HAS_MD5 = False
+
+
+import string
+
+
+def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ with open(filename, 'rb') as f:
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ digests.append(md5(f.read(int(head['ContentLength']))))
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(module.md5(filename))
+
+
+def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+ offset = 0
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ length = int(head['ContentLength'])
+ digests.append(md5(content[offset:offset + length]))
+ offset += length
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(md5(content).hexdigest())
+
+
+def validate_bucket_name(module, name):
+ # See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
+ if len(name) < 3:
+ module.fail_json(msg='the length of an S3 bucket must be at least 3 characters')
+ if len(name) > 63:
+ module.fail_json(msg='the length of an S3 bucket cannot exceed 63 characters')
+
+ legal_characters = string.ascii_lowercase + ".-" + string.digits
+ illegal_characters = [c for c in name if c not in legal_characters]
+ if illegal_characters:
+ module.fail_json(msg='invalid character(s) found in the bucket name')
+ if name[-1] not in string.ascii_lowercase + string.digits:
+ module.fail_json(msg='bucket names must begin and end with a letter or number')
+ return True
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py
new file mode 100644
index 00000000..1568e488
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py
@@ -0,0 +1,181 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ # minio seems to return [{}] as an empty tags_list
+ if not tags_list or not any(tag for tag in tags_list):
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
+
+
+def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
+
+ """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_dict = {'MyTagKey': 'MyTagValue'}
+ >>> ansible_dict_to_boto3_tag_list(tags_dict)
+ {
+ 'MyTagKey': 'MyTagValue'
+ }
+ Returns:
+ List: List of dicts containing tag keys and values
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ """
+
+ if not tags_dict:
+ return []
+
+ tags_list = []
+ for k, v in tags_dict.items():
+ tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
+
+ return tags_list
+
+
+def boto3_tag_specifications(tags_dict, types=None):
+ """ Converts a list of resource types and a flat dictionary of key:value pairs representing AWS
+ resource tags to a TagSpecification object.
+
+ https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TagSpecification.html
+
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+ types (list) A list of resource types to be tagged.
+ Basic Usage:
+ >>> tags_dict = {'MyTagKey': 'MyTagValue'}
+ >>> boto3_tag_specifications(tags_dict, ['instance'])
+ [
+ {
+ 'ResourceType': 'instance',
+ 'Tags': [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ }
+ ]
+ Returns:
+ List: List of dictionaries representing an AWS Tag Specification
+ """
+ if not tags_dict:
+ return None
+ specifications = list()
+ tag_list = ansible_dict_to_boto3_tag_list(tags_dict)
+
+ if not types:
+ specifications.append(dict(Tags=tag_list))
+ return specifications
+
+ if isinstance(types, string_types):
+ types = [types]
+
+ for type_name in types:
+ specifications.append(dict(ResourceType=type_name, Tags=tag_list))
+
+ return specifications
+
+
+def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
+ """
+ Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
+ Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
+ these may not be able to be used out of the box.
+
+ :param current_tags_dict:
+ :param new_tags_dict:
+ :param purge_tags:
+ :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
+ :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
+ """
+
+ tag_key_value_pairs_to_set = {}
+ tag_keys_to_unset = []
+
+ if purge_tags:
+ for key in current_tags_dict.keys():
+ if key in new_tags_dict:
+ continue
+ # Amazon have reserved 'aws:*' tags, we should avoid purging them as
+ # this probably isn't what people want to do...
+ if key.startswith('aws:'):
+ continue
+ tag_keys_to_unset.append(key)
+
+ for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
+ if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
+ tag_key_value_pairs_to_set[key] = new_tags_dict[key]
+
+ return tag_key_value_pairs_to_set, tag_keys_to_unset
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tower.py b/ansible_collections/amazon/aws/plugins/module_utils/tower.py
new file mode 100644
index 00000000..dd7d9738
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/tower.py
@@ -0,0 +1,83 @@
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import string
+import textwrap
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves.urllib import parse as urlparse
+
+
+def _windows_callback_script(passwd=None):
+ script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'
+ if passwd is not None:
+ passwd = passwd.replace("'", "''")
+ script_tpl = """\
+ <powershell>
+ $admin = [adsi]('WinNT://./administrator, user')
+ $admin.PSBase.Invoke('SetPassword', '${PASS}')
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('${SCRIPT}'))
+ </powershell>
+ """
+ else:
+ script_tpl = """\
+ <powershell>
+ $admin = [adsi]('WinNT://./administrator, user')
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('${SCRIPT}'))
+ </powershell>
+ """
+
+ tpl = string.Template(textwrap.dedent(script_tpl))
+ return tpl.safe_substitute(PASS=passwd, SCRIPT=script_url)
+
+
+def _linux_callback_script(tower_address, template_id, host_config_key):
+ template_id = urlparse.quote(template_id)
+ tower_address = urlparse.quote(tower_address)
+ host_config_key = host_config_key.replace("'", "'\"'\"'")
+
+ script_tpl = """\
+ #!/bin/bash
+ set -x
+
+ retry_attempts=10
+ attempt=0
+ while [[ $attempt -lt $retry_attempts ]]
+ do
+ status_code=$(curl --max-time 10 -v -k -s -i \
+ --data 'host_config_key=${host_config_key}' \
+ 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}')
+ if [[ $status_code == 404 ]]
+ then
+ status_code=$(curl --max-time 10 -v -k -s -i \
+ --data 'host_config_key=${host_config_key}' \
+ 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}')
+ # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404
+ fi
+ if [[ $status_code == 201 ]]
+ then
+ exit 0
+ fi
+ attempt=$(( attempt + 1 ))
+ echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})"
+ sleep 60
+ done
+ exit 1
+ """
+ tpl = string.Template(textwrap.dedent(script_tpl))
+ return tpl.safe_substitute(tower_address=tower_address,
+ template_id=template_id,
+ host_config_key=host_config_key)
+
+
+def tower_callback_script(tower_address, job_template_id, host_config_key, windows, passwd):
+ if windows:
+ return to_native(_windows_callback_script(passwd=passwd))
+ return _linux_callback_script(tower_address, job_template_id, host_config_key)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py
new file mode 100644
index 00000000..70d38cd8
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py
@@ -0,0 +1,140 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, bool):
+ filter_dict['Values'] = [str(v).lower()]
+ elif isinstance(v, integer_types):
+ filter_dict['Values'] = [str(v)]
+ elif isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def map_complex_type(complex_type, type_map):
+ """
+ Allows to cast elements within a dictionary to a specific type
+ Example of usage:
+
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int'
+ }
+
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+
+ This ensures all keys within the root element are casted and valid integers
+ """
+
+ if complex_type is None:
+ return
+ new_type = type(complex_type)()
+ if isinstance(complex_type, dict):
+ for key in complex_type:
+ if key in type_map:
+ if isinstance(type_map[key], list):
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key][0])
+ else:
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key])
+ else:
+ new_type[key] = complex_type[key]
+ elif isinstance(complex_type, list):
+ for i in range(len(complex_type)):
+ new_type.append(map_complex_type(
+ complex_type[i],
+ type_map))
+ elif type_map:
+ return globals()['__builtins__'][type_map](complex_type)
+ return new_type
+
+
+def scrub_none_parameters(parameters, descend_into_lists=True):
+ """
+ Iterate over a dictionary removing any keys that have a None value
+
+ Reference: https://github.com/ansible-collections/community.aws/issues/251
+ Credit: https://medium.com/better-programming/how-to-remove-null-none-values-from-a-dictionary-in-python-1bedf1aab5e4
+
+ :param descend_into_lists: whether or not to descend in to lists to continue to remove None values
+ :param parameters: parameter dict
+ :return: parameter dict with all keys = None removed
+ """
+
+ clean_parameters = {}
+
+ for k, v in parameters.items():
+ if isinstance(v, dict):
+ clean_parameters[k] = scrub_none_parameters(v, descend_into_lists=descend_into_lists)
+ elif descend_into_lists and isinstance(v, list):
+ clean_parameters[k] = [scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv for vv in v]
+ elif v is not None:
+ clean_parameters[k] = v
+
+ return clean_parameters
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/urls.py b/ansible_collections/amazon/aws/plugins/module_utils/urls.py
new file mode 100644
index 00000000..8011a1be
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/urls.py
@@ -0,0 +1,238 @@
+# Copyright: (c) 2018, Aaron Haaf <aabonh@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import hashlib
+import hmac
+import operator
+
+try:
+ from boto3 import session
+except ImportError:
+ pass
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+
+from .ec2 import HAS_BOTO3
+from .ec2 import get_aws_connection_info
+
+import ansible.module_utils.common.warnings as ansible_warnings
+
+
+def hexdigest(s):
+ """
+ Returns the sha256 hexdigest of a string after encoding.
+ """
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.hexdigest is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ return hashlib.sha256(s.encode("utf-8")).hexdigest()
+
+
+def format_querystring(params=None):
+ """
+ Returns properly url-encoded query string from the provided params dict.
+
+ It's specially sorted for cannonical requests
+ """
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.format_querystring is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ if not params:
+ return ""
+
+ # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name.
+ return urlencode(sorted(params.items(), operator.itemgetter(0)))
+
+
+# Key derivation functions. See:
+# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
+def sign(key, msg):
+ '''
+ Return digest for key applied to msg
+ '''
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.sign is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
+
+
+def get_signature_key(key, dateStamp, regionName, serviceName):
+ '''
+ Returns signature key for AWS resource
+ '''
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.get_signature_key is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp)
+ kRegion = sign(kDate, regionName)
+ kService = sign(kRegion, serviceName)
+ kSigning = sign(kService, "aws4_request")
+ return kSigning
+
+
+def get_aws_credentials_object(module):
+ '''
+ Returns aws_access_key_id, aws_secret_access_key, session_token for a module.
+ '''
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.get_aws_credentials_object is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ if not HAS_BOTO3:
+ module.fail_json("get_aws_credentials_object requires boto3")
+
+ dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True)
+ s = session.Session(**boto_params)
+
+ return s.get_credentials()
+
+
+# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
+def signed_request(
+ module=None,
+ method="GET", service=None, host=None, uri=None,
+ query=None, body="", headers=None,
+ session_in_header=True, session_in_query=False
+):
+ """Generate a SigV4 request to an AWS resource for a module
+
+ This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain.
+
+ Returns :class:`HTTPResponse` object.
+
+ Example:
+ result = signed_request(
+ module=this,
+ service="es",
+ host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com",
+ )
+
+ :kwarg host: endpoint to talk to
+ :kwarg service: AWS id of service (like `ec2` or `es`)
+ :kwarg module: An AnsibleAWSModule to gather connection info from
+
+ :kwarg body: (optional) Payload to send
+ :kwarg method: (optional) HTTP verb to use
+ :kwarg query: (optional) dict of query params to handle
+ :kwarg uri: (optional) Resource path without query parameters
+
+ :kwarg session_in_header: (optional) Add the session token to the headers
+ :kwarg session_in_query: (optional) Add the session token to the query parameters
+
+ :returns: HTTPResponse
+ """
+
+ module.deprecate(
+ 'amazon.aws.module_utils.urls.signed_request is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ if not HAS_BOTO3:
+ module.fail_json("A sigv4 signed_request requires boto3")
+
+ # "Constants"
+
+ t = datetime.datetime.utcnow()
+ amz_date = t.strftime("%Y%m%dT%H%M%SZ")
+ datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope
+ algorithm = "AWS4-HMAC-SHA256"
+
+ # AWS stuff
+
+ region, dummy, dummy = get_aws_connection_info(module, boto3=True)
+ credentials = get_aws_credentials_object(module)
+ access_key = credentials.access_key
+ secret_key = credentials.secret_key
+ session_token = credentials.token
+
+ if not access_key:
+ module.fail_json(msg="aws_access_key_id is missing")
+ if not secret_key:
+ module.fail_json(msg="aws_secret_access_key is missing")
+
+ credential_scope = "/".join([datestamp, region, service, "aws4_request"])
+
+ # Argument Defaults
+
+ uri = uri or "/"
+ query_string = format_querystring(query) if query else ""
+
+ headers = headers or dict()
+ query = query or dict()
+
+ headers.update({
+ "host": host,
+ "x-amz-date": amz_date,
+ })
+
+ # Handle adding of session_token if present
+ if session_token:
+ if session_in_header:
+ headers["X-Amz-Security-Token"] = session_token
+ if session_in_query:
+ query["X-Amz-Security-Token"] = session_token
+
+ if method == "GET":
+ body = ""
+
+ # Derived data
+
+ body_hash = hexdigest(body)
+ signed_headers = ";".join(sorted(headers.keys()))
+
+ # Setup Cannonical request to generate auth token
+
+ cannonical_headers = "\n".join([
+ key.lower().strip() + ":" + value for key, value in headers.items()
+ ]) + "\n" # Note additional trailing newline
+
+ cannonical_request = "\n".join([
+ method,
+ uri,
+ query_string,
+ cannonical_headers,
+ signed_headers,
+ body_hash,
+ ])
+
+ string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)])
+
+ # Sign the Cannonical request
+
+ signing_key = get_signature_key(secret_key, datestamp, region, service)
+ signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
+
+ # Make auth header with that info
+
+ authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format(
+ algorithm, access_key, credential_scope, signed_headers, signature
+ )
+
+ # PERFORM THE REQUEST!
+
+ url = "https://" + host + uri
+
+ if query_string != "":
+ url = url + "?" + query_string
+
+ final_headers = {
+ "x-amz-date": amz_date,
+ "Authorization": authorization_header,
+ }
+
+ final_headers.update(headers)
+
+ return open_url(url, method=method, data=body, headers=final_headers)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/version.py b/ansible_collections/amazon/aws/plugins/module_utils/version.py
new file mode 100644
index 00000000..8f4ca363
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/version.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Provide version object to compare version numbers."""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can
+# remove the _version.py file, and replace the following import by
+#
+# from ansible.module_utils.compat.version import LooseVersion
+
+from ._version import LooseVersion # pylint: disable=unused-import
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waf.py b/ansible_collections/amazon/aws/plugins/module_utils/waf.py
new file mode 100644
index 00000000..226dca92
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/waf.py
@@ -0,0 +1,224 @@
+# Copyright (c) 2017 Will Thames
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Web Application Firewall modules
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported HAS_BOTO3
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from .ec2 import AWSRetry
+from .waiters import get_waiter
+
+
+MATCH_LOOKUP = {
+ 'byte': {
+ 'method': 'byte_match_set',
+ 'conditionset': 'ByteMatchSet',
+ 'conditiontuple': 'ByteMatchTuple',
+ 'type': 'ByteMatch'
+ },
+ 'geo': {
+ 'method': 'geo_match_set',
+ 'conditionset': 'GeoMatchSet',
+ 'conditiontuple': 'GeoMatchConstraint',
+ 'type': 'GeoMatch'
+ },
+ 'ip': {
+ 'method': 'ip_set',
+ 'conditionset': 'IPSet',
+ 'conditiontuple': 'IPSetDescriptor',
+ 'type': 'IPMatch'
+ },
+ 'regex': {
+ 'method': 'regex_match_set',
+ 'conditionset': 'RegexMatchSet',
+ 'conditiontuple': 'RegexMatchTuple',
+ 'type': 'RegexMatch'
+ },
+ 'size': {
+ 'method': 'size_constraint_set',
+ 'conditionset': 'SizeConstraintSet',
+ 'conditiontuple': 'SizeConstraint',
+ 'type': 'SizeConstraint'
+ },
+ 'sql': {
+ 'method': 'sql_injection_match_set',
+ 'conditionset': 'SqlInjectionMatchSet',
+ 'conditiontuple': 'SqlInjectionMatchTuple',
+ 'type': 'SqlInjectionMatch',
+ },
+ 'xss': {
+ 'method': 'xss_match_set',
+ 'conditionset': 'XssMatchSet',
+ 'conditiontuple': 'XssMatchTuple',
+ 'type': 'XssMatch'
+ },
+}
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_rule_with_backoff(client, rule_id):
+ return client.get_rule(RuleId=rule_id)['Rule']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_byte_match_set_with_backoff(client, byte_match_set_id):
+ return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_ip_set_with_backoff(client, ip_set_id):
+ return client.get_ip_set(IPSetId=ip_set_id)['IPSet']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_size_constraint_set_with_backoff(client, size_constraint_set_id):
+ return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id):
+ return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_xss_match_set_with_backoff(client, xss_match_set_id):
+ return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet']
+
+
+def get_rule(client, module, rule_id):
+ try:
+ rule = get_rule_with_backoff(client, rule_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain waf rule")
+
+ match_sets = {
+ 'ByteMatch': get_byte_match_set_with_backoff,
+ 'IPMatch': get_ip_set_with_backoff,
+ 'SizeConstraint': get_size_constraint_set_with_backoff,
+ 'SqlInjectionMatch': get_sql_injection_match_set_with_backoff,
+ 'XssMatch': get_xss_match_set_with_backoff
+ }
+ if 'Predicates' in rule:
+ for predicate in rule['Predicates']:
+ if predicate['Type'] in match_sets:
+ predicate.update(match_sets[predicate['Type']](client, predicate['DataId']))
+ # replaced by Id from the relevant MatchSet
+ del predicate['DataId']
+ return rule
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_web_acl_with_backoff(client, web_acl_id):
+ return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
+
+
+def get_web_acl(client, module, web_acl_id):
+ try:
+ web_acl = get_web_acl_with_backoff(client, web_acl_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acl")
+
+ if web_acl:
+ try:
+ for rule in web_acl['Rules']:
+ rule.update(get_rule(client, module, rule['RuleId']))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acl rule")
+ return camel_dict_to_snake_dict(web_acl)
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def list_rules_with_backoff(client):
+ paginator = client.get_paginator('list_rules')
+ return paginator.paginate().build_full_result()['Rules']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def list_regional_rules_with_backoff(client):
+ resp = client.list_rules()
+ rules = []
+ while resp:
+ rules += resp['Rules']
+ resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
+ return rules
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def list_web_acls_with_backoff(client):
+ paginator = client.get_paginator('list_web_acls')
+ return paginator.paginate().build_full_result()['WebACLs']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def list_regional_web_acls_with_backoff(client):
+ resp = client.list_web_acls()
+ acls = []
+ while resp:
+ acls += resp['WebACLs']
+ resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
+ return acls
+
+
+def list_web_acls(client, module):
+ try:
+ if client.__class__.__name__ == 'WAF':
+ return list_web_acls_with_backoff(client)
+ elif client.__class__.__name__ == 'WAFRegional':
+ return list_regional_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acls")
+
+
+def get_change_token(client, module):
+ try:
+ token = client.get_change_token()
+ return token['ChangeToken']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain change token")
+
+
+@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=['WAFStaleDataException'])
+def run_func_with_change_token_backoff(client, module, params, func, wait=False):
+ params['ChangeToken'] = get_change_token(client, module)
+ result = func(**params)
+ if wait:
+ get_waiter(
+ client, 'change_token_in_sync',
+ ).wait(
+ ChangeToken=result['ChangeToken']
+ )
+ return result
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
new file mode 100644
index 00000000..2abf390c
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
@@ -0,0 +1,1265 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+
+try:
+ import botocore.waiter as core_waiter
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import _RetryingBotoClientWrapper
+
+
+ec2_data = {
+ "version": 2,
+ "waiters": {
+ "ImageAvailable": {
+ "operation": "DescribeImages",
+ "maxAttempts": 80,
+ "delay": 15,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "Images[].State",
+ "expected": "available"
+ },
+ {
+ "state": "failure",
+ "matcher": "pathAny",
+ "argument": "Images[].State",
+ "expected": "failed"
+ }
+ ]
+ },
+ "InternetGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeInternetGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(InternetGateways) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInternetGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "InternetGatewayAttached": {
+ "operation": "DescribeInternetGateways",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "InternetGateways[].Attachments[].State"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInternetGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "NetworkInterfaceAttached": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "attached",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.Status"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "NetworkInterfaceAvailable": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Status"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "retry"
+ },
+ ]
+ },
+ "NetworkInterfaceDeleted": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(NetworkInterfaces[]) > `0`",
+ "state": "retry"
+ },
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(NetworkInterfaces[]) == `0`",
+ "state": "success"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "success"
+ },
+ ]
+ },
+ "NetworkInterfaceDeleteOnTerminate": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 10,
+ "acceptors": [
+ {
+ "expected": True,
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "NetworkInterfaceNoDeleteOnTerminate": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 10,
+ "acceptors": [
+ {
+ "expected": False,
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "RouteTableExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeRouteTables",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(RouteTables[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidRouteTableID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SecurityGroupExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSecurityGroups",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(SecurityGroups[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidGroup.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SnapshotCompleted": {
+ "delay": 15,
+ "operation": "DescribeSnapshots",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "completed",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "Snapshots[].State"
+ }
+ ]
+ },
+ "SubnetAvailable": {
+ "delay": 15,
+ "operation": "DescribeSubnets",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "Subnets[].State"
+ }
+ ]
+ },
+ "SubnetExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetHasMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetHasAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetDeleted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "retry"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "success"
+ },
+ ]
+ },
+ "VpcAvailable": {
+ "delay": 15,
+ "operation": "DescribeVpcs",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "Vpcs[].State"
+ }
+ ]
+ },
+ "VpcExists": {
+ "operation": "DescribeVpcs",
+ "delay": 1,
+ "maxAttempts": 5,
+ "acceptors": [
+ {
+ "matcher": "status",
+ "expected": 200,
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpcID.NotFound",
+ "state": "retry"
+ }
+ ]
+ },
+ "VpcEndpointExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpcEndpoints",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(VpcEndpoints[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpcEndpointId.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "VpnGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(VpnGateways[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpnGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "VpnGatewayDetached": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "VpnGateways[0].State == 'available'",
+ "state": "success"
+ },
+ ]
+ },
+ "NatGatewayDeleted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeNatGateways",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "expected": "deleted",
+ "argument": "NatGateways[].State"
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "NatGatewayNotFound"
+ }
+ ]
+ },
+ "NatGatewayAvailable": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeNatGateways",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "expected": "available",
+ "argument": "NatGateways[].State"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "NatGatewayNotFound"
+ }
+ ]
+ },
+ }
+}
+
+
+waf_data = {
+ "version": 2,
+ "waiters": {
+ "ChangeTokenInSync": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "GetChangeTokenStatus",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "ChangeTokenStatus == 'INSYNC'",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "WAFInternalErrorException",
+ "state": "retry"
+ }
+ ]
+ }
+ }
+}
+
+eks_data = {
+ "version": 2,
+ "waiters": {
+ "ClusterActive": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "cluster.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "ClusterDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "cluster.status != 'DELETED'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "FargateProfileActive": {
+ "delay": 20,
+ "maxAttempts": 30,
+ "operation": "DescribeFargateProfile",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "fargateProfile.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "FargateProfileDeleted": {
+ "delay": 20,
+ "maxAttempts": 30,
+ "operation": "DescribeFargateProfile",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "fargateProfile.status == 'DELETING'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "NodegroupActive": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeNodegroup",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "nodegroup.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "NodegroupDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeNodegroup",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "nodegroup.status == 'DELETING'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ }
+ }
+}
+
+
+elb_data = {
+ "version": 2,
+ "waiters": {
+ "AnyInstanceInService": {
+ "acceptors": [
+ {
+ "argument": "InstanceStates[].State",
+ "expected": "InService",
+ "matcher": "pathAny",
+ "state": "success"
+ }
+ ],
+ "delay": 15,
+ "maxAttempts": 40,
+ "operation": "DescribeInstanceHealth"
+ },
+ "InstanceDeregistered": {
+ "delay": 15,
+ "operation": "DescribeInstanceHealth",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "OutOfService",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "InstanceStates[].State"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInstance",
+ "state": "success"
+ }
+ ]
+ },
+ "InstanceInService": {
+ "acceptors": [
+ {
+ "argument": "InstanceStates[].State",
+ "expected": "InService",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInstance",
+ "state": "retry"
+ }
+ ],
+ "delay": 15,
+ "maxAttempts": 40,
+ "operation": "DescribeInstanceHealth"
+ },
+ "LoadBalancerCreated": {
+ "delay": 10,
+ "maxAttempts": 60,
+ "operation": "DescribeLoadBalancers",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(LoadBalancerDescriptions[]) > `0`",
+ "state": "success",
+ },
+ {
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound",
+ "state": "retry",
+ },
+ ],
+ },
+ "LoadBalancerDeleted": {
+ "delay": 10,
+ "maxAttempts": 60,
+ "operation": "DescribeLoadBalancers",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(LoadBalancerDescriptions[]) > `0`",
+ "state": "retry",
+ },
+ {
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound",
+ "state": "success",
+ },
+ ],
+ },
+ }
+}
+
+elbv2_data = {
+ "version": 2,
+ "waiters": {
+ "LoadBalancerAvailable": {
+ "delay": 15,
+ "operation": "DescribeLoadBalancers",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "LoadBalancers[].State.Code",
+ "expected": "active"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "LoadBalancers[].State.Code",
+ "expected": "provisioning"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound"
+ }
+ ]
+ },
+ "LoadBalancerIpAddressTypeIpv4": {
+ "delay": 15,
+ "operation": "DescribeLoadBalancers",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "LoadBalancers[].IpAddressType",
+ "expected": "ipv4"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "LoadBalancers[].IpAddressType",
+ "expected": "dualstack"
+ },
+ {
+ "state": "failure",
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound"
+ }
+ ]
+ },
+ "LoadBalancerIpAddressTypeDualStack": {
+ "delay": 15,
+ "operation": "DescribeLoadBalancers",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "LoadBalancers[].IpAddressType",
+ "expected": "dualstack"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "LoadBalancers[].IpAddressType",
+ "expected": "ipv4"
+ },
+ {
+ "state": "failure",
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound"
+ }
+ ]
+ },
+ "LoadBalancersDeleted": {
+ "delay": 15,
+ "operation": "DescribeLoadBalancers",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "pathAll",
+ "argument": "LoadBalancers[].State.Code",
+ "expected": "active"
+ },
+ {
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound",
+ "state": "success"
+ }
+ ]
+ },
+ }
+}
+
+
+rds_data = {
+ "version": 2,
+ "waiters": {
+ "DBInstanceStopped": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].DBInstanceStatus",
+ "expected": "stopped"
+ },
+ ]
+ },
+ "DBClusterAvailable": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBClusters",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBClusters[].Status",
+ "expected": "available"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "DBClusterNotFoundFault"
+ }
+ ]
+ },
+ "DBClusterDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBClusters",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBClusters[].Status",
+ "expected": "stopped"
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "DBClusterNotFoundFault"
+ }
+ ]
+ },
+ "ReadReplicaPromoted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "length(DBInstances[].StatusInfos) == `0`",
+ "expected": True
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "DBInstances[].StatusInfos[].Status",
+ "expected": "replicating"
+ }
+ ]
+ },
+ "RoleAssociated": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].AssociatedRoles[].Status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "DBInstances[].AssociatedRoles[].Status",
+ "expected": "PENDING"
+ }
+ ]
+ },
+ "RoleDisassociated": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].AssociatedRoles[].Status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "DBInstances[].AssociatedRoles[].Status",
+ "expected": "PENDING"
+ },
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "length(DBInstances[].AssociatedRoles[]) == `0`",
+ "expected": True
+ },
+ ]
+ }
+ }
+}
+
+
+route53_data = {
+ "version": 2,
+ "waiters": {
+ "ResourceRecordSetsChanged": {
+ "delay": 30,
+ "maxAttempts": 60,
+ "operation": "GetChange",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": "INSYNC",
+ "argument": "ChangeInfo.Status",
+ "state": "success"
+ }
+ ]
+ }
+ }
+}
+
+
+def _inject_limit_retries(model):
+
+ extra_retries = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling']
+
+ acceptors = []
+ for error in extra_retries:
+ acceptors.append({"state": "success", "matcher": "error", "expected": error})
+
+ _model = copy.deepcopy(model)
+
+ for waiter in model["waiters"]:
+ _model["waiters"][waiter]["acceptors"].extend(acceptors)
+
+ return _model
+
+
+def ec2_model(name):
+ ec2_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(ec2_data))
+ return ec2_models.get_waiter(name)
+
+
+def waf_model(name):
+ waf_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(waf_data))
+ return waf_models.get_waiter(name)
+
+
+def eks_model(name):
+ eks_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(eks_data))
+ return eks_models.get_waiter(name)
+
+
+def elbv2_model(name):
+ elbv2_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(elbv2_data))
+ return elbv2_models.get_waiter(name)
+
+
+def elb_model(name):
+ elb_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(elb_data))
+ return elb_models.get_waiter(name)
+
+
+def rds_model(name):
+ rds_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(rds_data))
+ return rds_models.get_waiter(name)
+
+
+def route53_model(name):
+ route53_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(route53_data))
+ return route53_models.get_waiter(name)
+
+
+waiters_by_name = {
+ ('EC2', 'image_available'): lambda ec2: core_waiter.Waiter(
+ 'image_available',
+ ec2_model('ImageAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_images
+ )),
+ ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'internet_gateway_exists',
+ ec2_model('InternetGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_internet_gateways
+ )),
+ ('EC2', 'internet_gateway_attached'): lambda ec2: core_waiter.Waiter(
+ 'internet_gateway_attached',
+ ec2_model('InternetGatewayAttached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_internet_gateways
+ )),
+ ('EC2', 'network_interface_attached'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_attached',
+ ec2_model('NetworkInterfaceAttached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_deleted'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_deleted',
+ ec2_model('NetworkInterfaceDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_available'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_available',
+ ec2_model('NetworkInterfaceAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_delete_on_terminate'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_delete_on_terminate',
+ ec2_model('NetworkInterfaceDeleteOnTerminate'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_no_delete_on_terminate'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_no_delete_on_terminate',
+ ec2_model('NetworkInterfaceNoDeleteOnTerminate'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
+ 'route_table_exists',
+ ec2_model('RouteTableExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_route_tables
+ )),
+ ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
+ 'security_group_exists',
+ ec2_model('SecurityGroupExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_security_groups
+ )),
+ ('EC2', 'snapshot_completed'): lambda ec2: core_waiter.Waiter(
+ 'snapshot_completed',
+ ec2_model('SnapshotCompleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_snapshots
+ )),
+ ('EC2', 'subnet_available'): lambda ec2: core_waiter.Waiter(
+ 'subnet_available',
+ ec2_model('SubnetAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
+ 'subnet_exists',
+ ec2_model('SubnetExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_map_public',
+ ec2_model('SubnetHasMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_map_public',
+ ec2_model('SubnetNoMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_assign_ipv6',
+ ec2_model('SubnetHasAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_assign_ipv6',
+ ec2_model('SubnetNoAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
+ 'subnet_deleted',
+ ec2_model('SubnetDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'vpc_available'): lambda ec2: core_waiter.Waiter(
+ 'vpc_available',
+ ec2_model('VpcAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpcs
+ )),
+ ('EC2', 'vpc_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpc_exists',
+ ec2_model('VpcExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpcs
+ )),
+ ('EC2', 'vpc_endpoint_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpc_endpoint_exists',
+ ec2_model('VpcEndpointExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpc_endpoints
+ )),
+ ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_exists',
+ ec2_model('VpnGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_detached',
+ ec2_model('VpnGatewayDetached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('EC2', 'nat_gateway_deleted'): lambda ec2: core_waiter.Waiter(
+ 'nat_gateway_deleted',
+ ec2_model('NatGatewayDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_nat_gateways
+ )),
+ ('EC2', 'nat_gateway_available'): lambda ec2: core_waiter.Waiter(
+ 'nat_gateway_available',
+ ec2_model('NatGatewayAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_nat_gateways
+ )),
+ ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
+ 'cluster_active',
+ eks_model('ClusterActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
+ 'cluster_deleted',
+ eks_model('ClusterDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('EKS', 'fargate_profile_active'): lambda eks: core_waiter.Waiter(
+ 'fargate_profile_active',
+ eks_model('FargateProfileActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_fargate_profile
+ )),
+ ('EKS', 'fargate_profile_deleted'): lambda eks: core_waiter.Waiter(
+ 'fargate_profile_deleted',
+ eks_model('FargateProfileDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_fargate_profile
+ )),
+ ('EKS', 'nodegroup_active'): lambda eks: core_waiter.Waiter(
+ 'nodegroup_active',
+ eks_model('NodegroupActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_nodegroup
+ )),
+ ('EKS', 'nodegroup_deleted'): lambda eks: core_waiter.Waiter(
+ 'nodegroup_deleted',
+ eks_model('NodegroupDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_nodegroup
+ )),
+ ('ElasticLoadBalancing', 'any_instance_in_service'): lambda elb: core_waiter.Waiter(
+ 'any_instance_in_service',
+ elb_model('AnyInstanceInService'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_instance_health
+ )),
+ ('ElasticLoadBalancing', 'instance_deregistered'): lambda elb: core_waiter.Waiter(
+ 'instance_deregistered',
+ elb_model('InstanceDeregistered'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_instance_health
+ )),
+ ('ElasticLoadBalancing', 'instance_in_service'): lambda elb: core_waiter.Waiter(
+ 'load_balancer_created',
+ elb_model('InstanceInService'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_instance_health
+ )),
+ ('ElasticLoadBalancing', 'load_balancer_created'): lambda elb: core_waiter.Waiter(
+ 'load_balancer_created',
+ elb_model('LoadBalancerCreated'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_load_balancers
+ )),
+ ('ElasticLoadBalancing', 'load_balancer_deleted'): lambda elb: core_waiter.Waiter(
+ 'load_balancer_deleted',
+ elb_model('LoadBalancerDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_load_balancers
+ )),
+ ('ElasticLoadBalancingv2', 'load_balancer_available'): lambda elbv2: core_waiter.Waiter(
+ 'load_balancer_available',
+ elbv2_model('LoadBalancerAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ elbv2.describe_load_balancers
+ )),
+ ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_ipv4'): lambda elbv2: core_waiter.Waiter(
+ 'load_balancer_ip_address_type_ipv4',
+ elbv2_model('LoadBalancerIpAddressTypeIpv4'),
+ core_waiter.NormalizedOperationMethod(
+ elbv2.describe_load_balancers
+ )),
+ ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_dualstack'): lambda elbv2: core_waiter.Waiter(
+ 'load_balancers_ip_address_type_dualstack',
+ elbv2_model('LoadBalancerIpAddressTypeDualStack'),
+ core_waiter.NormalizedOperationMethod(
+ elbv2.describe_load_balancers
+ )),
+ ('ElasticLoadBalancingv2', 'load_balancers_deleted'): lambda elbv2: core_waiter.Waiter(
+ 'load_balancers_deleted',
+ elbv2_model('LoadBalancersDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ elbv2.describe_load_balancers
+ )),
+ ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
+ 'db_instance_stopped',
+ rds_model('DBInstanceStopped'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+ ('RDS', 'cluster_available'): lambda rds: core_waiter.Waiter(
+ 'cluster_available',
+ rds_model('DBClusterAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_clusters
+ )),
+ ('RDS', 'cluster_deleted'): lambda rds: core_waiter.Waiter(
+ 'cluster_deleted',
+ rds_model('DBClusterDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_clusters
+ )),
+ ('RDS', 'read_replica_promoted'): lambda rds: core_waiter.Waiter(
+ 'read_replica_promoted',
+ rds_model('ReadReplicaPromoted'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+ ('RDS', 'role_associated'): lambda rds: core_waiter.Waiter(
+ 'role_associated',
+ rds_model('RoleAssociated'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+ ('RDS', 'role_disassociated'): lambda rds: core_waiter.Waiter(
+ 'role_disassociated',
+ rds_model('RoleDisassociated'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+ ('Route53', 'resource_record_sets_changed'): lambda route53: core_waiter.Waiter(
+ 'resource_record_sets_changed',
+ route53_model('ResourceRecordSetsChanged'),
+ core_waiter.NormalizedOperationMethod(
+ route53.get_change
+ )),
+}
+
+
+def get_waiter(client, waiter_name):
+ if isinstance(client, _RetryingBotoClientWrapper):
+ return get_waiter(client.client, waiter_name)
+ try:
+ return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
+ except KeyError:
+ raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
+ waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py
new file mode 100644
index 00000000..fcc523d5
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py
@@ -0,0 +1,1962 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: autoscaling_group
+version_added: 5.0.0
+short_description: Create or delete AWS AutoScaling Groups (ASGs)
+description:
+ - Can create or delete AWS AutoScaling Groups.
+ - Can be used with the M(community.aws.autoscaling_launch_config) module to manage Launch Configurations.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg).
+ The usage did not change.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - "Gareth Rushgrove (@garethr)"
+options:
+ state:
+ description:
+ - Register or deregister the instance.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ name:
+ description:
+ - Unique name for group to be created or deleted.
+ required: true
+ type: str
+ load_balancers:
+ description:
+ - List of ELB names to use for the group. Use for classic load balancers.
+ type: list
+ elements: str
+ target_group_arns:
+ description:
+ - List of target group ARNs to use for the group. Use for application load balancers.
+ type: list
+ elements: str
+ availability_zones:
+ description:
+ - List of availability zone names in which to create the group.
+ - Defaults to all the availability zones in the region if I(vpc_zone_identifier) is not set.
+ type: list
+ elements: str
+ launch_config_name:
+ description:
+ - Name of the Launch configuration to use for the group. See the community.aws.autoscaling_launch_config) module for managing these.
+ - If unspecified then the current group value will be used. One of I(launch_config_name) or I(launch_template) must be provided.
+ type: str
+ launch_template:
+ description:
+ - Dictionary describing the Launch Template to use
+ suboptions:
+ version:
+ description:
+ - The version number of the launch template to use.
+ - Defaults to latest version if not provided.
+ type: str
+ launch_template_name:
+ description:
+ - The name of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required.
+ type: str
+ launch_template_id:
+ description:
+ - The id of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required.
+ type: str
+ type: dict
+ min_size:
+ description:
+ - Minimum number of instances in group, if unspecified then the current group value will be used.
+ type: int
+ max_size:
+ description:
+ - Maximum number of instances in group, if unspecified then the current group value will be used.
+ type: int
+ max_instance_lifetime:
+ description:
+ - The maximum amount of time, in seconds, that an instance can be in service.
+ - Maximum instance lifetime must be equal to 0, between 604800 and 31536000 seconds (inclusive), or not specified.
+ - Value of 0 removes lifetime restriction.
+ type: int
+ mixed_instances_policy:
+ description:
+ - A mixed instance policy to use for the ASG.
+ - Only used when the ASG is configured to use a Launch Template (I(launch_template)).
+ - 'See also U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-mixedinstancespolicy.html)'
+ required: false
+ suboptions:
+ instance_types:
+ description:
+ - A list of instance_types.
+ type: list
+ elements: str
+ required: false
+ instances_distribution:
+ description:
+ - >-
+ Specifies the distribution of On-Demand Instances and Spot Instances, the maximum price
+ to pay for Spot Instances, and how the Auto Scaling group allocates instance types
+ to fulfill On-Demand and Spot capacity.
+ - 'See also U(https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_InstancesDistribution.html)'
+ required: false
+ type: dict
+ version_added: 1.5.0
+ version_added_collection: community.aws
+ suboptions:
+ on_demand_allocation_strategy:
+ description:
+ - Indicates how to allocate instance types to fulfill On-Demand capacity.
+ type: str
+ required: false
+ version_added: 1.5.0
+ version_added_collection: community.aws
+ on_demand_base_capacity:
+ description:
+ - >-
+ The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand
+ Instances. This base portion is provisioned first as your group scales.
+ - >-
+ Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a
+ percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.
+ type: int
+ required: false
+ version_added: 1.5.0
+ version_added_collection: community.aws
+ on_demand_percentage_above_base_capacity:
+ description:
+ - Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.
+ - Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.
+ - 'Valid range: 0 to 100'
+ type: int
+ required: false
+ version_added: 1.5.0
+ version_added_collection: community.aws
+ spot_allocation_strategy:
+ description:
+ - Indicates how to allocate instances across Spot Instance pools.
+ type: str
+ required: false
+ version_added: 1.5.0
+ version_added_collection: community.aws
+ spot_instance_pools:
+ description:
+ - >-
+ The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from
+ the different instance types in the Overrides array of LaunchTemplate. Default if not set is 2.
+ - Used only when the Spot allocation strategy is lowest-price.
+ - 'Valid Range: Minimum value of 1. Maximum value of 20.'
+ type: int
+ required: false
+ version_added: 1.5.0
+ version_added_collection: community.aws
+ spot_max_price:
+ description:
+ - The maximum price per unit hour that you are willing to pay for a Spot Instance.
+ - If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.
+ - To remove a value that you previously set, include the parameter but leave the value blank.
+ type: str
+ required: false
+ version_added: 1.5.0
+ version_added_collection: community.aws
+ type: dict
+ placement_group:
+ description:
+ - Physical location of your cluster placement group created in Amazon EC2.
+ type: str
+ desired_capacity:
+ description:
+ - Desired number of instances in group, if unspecified then the current group value will be used.
+ type: int
+ replace_all_instances:
+ description:
+ - In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration.
+ It increases the ASG size by I(replace_batch_size), waits for the new instances to be up and running.
+ After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced.
+ Once that's done the ASG size is reduced back to the expected size.
+ default: false
+ type: bool
+ replace_batch_size:
+ description:
+ - Number of instances you'd like to replace at a time. Used with I(replace_all_instances).
+ required: false
+ default: 1
+ type: int
+ replace_instances:
+ description:
+ - List of I(instance_ids) belonging to the named AutoScalingGroup that you would like to terminate and be replaced with instances
+ matching the current launch configuration.
+ type: list
+ elements: str
+ detach_instances:
+ description:
+ - Removes one or more instances from the specified AutoScalingGroup.
+ - If I(decrement_desired_capacity) flag is not set, new instance(s) are launched to replace the detached instance(s).
+ - If a Classic Load Balancer is attached to the AutoScalingGroup, the instances are also deregistered from the load balancer.
+ - If there are target groups attached to the AutoScalingGroup, the instances are also deregistered from the target groups.
+ type: list
+ elements: str
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ decrement_desired_capacity:
+ description:
+ - Indicates whether the AutoScalingGroup decrements the desired capacity value by the number of instances detached.
+ default: false
+ type: bool
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ lc_check:
+ description:
+ - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current I(launch_config).
+ default: true
+ type: bool
+ lt_check:
+ description:
+ - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current
+ I(launch_template or I(launch_template) I(version).
+ default: true
+ type: bool
+ vpc_zone_identifier:
+ description:
+ - List of VPC subnets to use
+ type: list
+ elements: str
+ tags:
+ description:
+ - A list of tags to add to the Auto Scale Group.
+ - Optional key is I(propagate_at_launch), which defaults to true.
+ - When I(propagate_at_launch) is true the tags will be propagated to the Instances created.
+ type: list
+ elements: dict
+ purge_tags:
+ description:
+ - If C(true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
+ - If the I(tags) parameter is not set then tags will not be modified.
+ default: false
+ type: bool
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ health_check_period:
+ description:
+ - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ required: false
+ default: 300
+ type: int
+ health_check_type:
+ description:
+ - The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
+ required: false
+ default: EC2
+ choices: ['EC2', 'ELB']
+ type: str
+ default_cooldown:
+ description:
+ - The number of seconds after a scaling activity completes before another can begin.
+ default: 300
+ type: int
+ wait_timeout:
+ description:
+ - How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy",
+ try increasing this value.
+ default: 300
+ type: int
+ wait_for_instances:
+ description:
+ - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all
+ instances have a lifecycle_state of "InService" and a health_status of "Healthy".
+ default: true
+ type: bool
+ termination_policies:
+ description:
+ - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
+ - Using I(termination_policies=Default) when modifying an existing AutoScalingGroup will result in the existing policy being retained
+ instead of changed to C(Default).
+ - 'Valid values include: C(Default), C(OldestInstance), C(NewestInstance), C(OldestLaunchConfiguration), C(ClosestToNextInstanceHour)'
+ - 'Full documentation of valid values can be found in the AWS documentation:'
+ - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#custom-termination-policy)'
+ default: Default
+ type: list
+ elements: str
+ notification_topic:
+ description:
+ - A SNS topic ARN to send auto scaling notifications to.
+ type: str
+ notification_types:
+ description:
+ - A list of auto scaling events to trigger notifications on.
+ default:
+ - 'autoscaling:EC2_INSTANCE_LAUNCH'
+ - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR'
+ - 'autoscaling:EC2_INSTANCE_TERMINATE'
+ - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
+ required: false
+ type: list
+ elements: str
+ suspend_processes:
+ description:
+ - A list of scaling processes to suspend.
+ - 'Valid values include:'
+ - C(Launch), C(Terminate), C(HealthCheck), C(ReplaceUnhealthy), C(AZRebalance), C(AlarmNotification), C(ScheduledActions), C(AddToLoadBalancer)
+ - 'Full documentation of valid values can be found in the AWS documentation:'
+ - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)'
+ default: []
+ type: list
+ elements: str
+ metrics_collection:
+ description:
+ - Enable ASG metrics collection.
+ type: bool
+ default: false
+ metrics_granularity:
+ description:
+ - When I(metrics_collection=true) this will determine the granularity of metrics collected by CloudWatch.
+ default: "1Minute"
+ type: str
+ metrics_list:
+ description:
+ - List of autoscaling metrics to collect when I(metrics_collection=true).
+ default:
+ - 'GroupMinSize'
+ - 'GroupMaxSize'
+ - 'GroupDesiredCapacity'
+ - 'GroupInServiceInstances'
+ - 'GroupPendingInstances'
+ - 'GroupStandbyInstances'
+ - 'GroupTerminatingInstances'
+ - 'GroupTotalInstances'
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Basic configuration with Launch Configuration
+
+- amazon.aws.autoscaling_group:
+ name: special
+ load_balancers: [ 'lb1', 'lb2' ]
+ availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
+ launch_config_name: 'lc-1'
+ min_size: 1
+ max_size: 10
+ desired_capacity: 5
+ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
+ tags:
+ - environment: production
+ propagate_at_launch: false
+
+# Rolling ASG Updates
+
+# Below is an example of how to assign a new launch config to an ASG and terminate old instances.
+#
+# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
+# a rolling fashion with instances using the current launch configuration, "my_new_lc".
+#
+# This could also be considered a rolling deploy of a pre-baked AMI.
+#
+# If this is a newly created group, the instances will not be replaced since all instances
+# will have the current launch configuration.
+
+- name: create launch config
+ community.aws.autoscaling_launch_config:
+ name: my_new_lc
+ image_id: ami-lkajsf
+ key_name: mykey
+ region: us-east-1
+ security_groups: sg-23423
+ instance_type: m1.small
+ assign_public_ip: true
+
+- amazon.aws.autoscaling_group:
+ name: myasg
+ launch_config_name: my_new_lc
+ health_check_period: 60
+ health_check_type: ELB
+ replace_all_instances: true
+ min_size: 5
+ max_size: 5
+ desired_capacity: 5
+ region: us-east-1
+
+# To only replace a couple of instances instead of all of them, supply a list
+# to "replace_instances":
+
+- amazon.aws.autoscaling_group:
+ name: myasg
+ launch_config_name: my_new_lc
+ health_check_period: 60
+ health_check_type: ELB
+ replace_instances:
+ - i-b345231
+ - i-24c2931
+ min_size: 5
+ max_size: 5
+ desired_capacity: 5
+ region: us-east-1
+
+# Basic Configuration with Launch Template
+
+- amazon.aws.autoscaling_group:
+ name: special
+ load_balancers: [ 'lb1', 'lb2' ]
+ availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
+ launch_template:
+ version: '1'
+ launch_template_name: 'lt-example'
+ launch_template_id: 'lt-123456'
+ min_size: 1
+ max_size: 10
+ desired_capacity: 5
+ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
+ tags:
+ - environment: production
+ propagate_at_launch: false
+
+# Basic Configuration with Launch Template using mixed instance policy
+
+- amazon.aws.autoscaling_group:
+ name: special
+ load_balancers: [ 'lb1', 'lb2' ]
+ availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
+ launch_template:
+ version: '1'
+ launch_template_name: 'lt-example'
+ launch_template_id: 'lt-123456'
+ mixed_instances_policy:
+ instance_types:
+ - t3a.large
+ - t3.large
+ - t2.large
+ instances_distribution:
+ on_demand_percentage_above_base_capacity: 0
+ spot_allocation_strategy: capacity-optimized
+ min_size: 1
+ max_size: 10
+ desired_capacity: 5
+ vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
+ tags:
+ - environment: production
+ propagate_at_launch: false
+'''
+
+RETURN = r'''
+---
+auto_scaling_group_name:
+ description: The unique name of the auto scaling group
+ returned: success
+ type: str
+ sample: "myasg"
+auto_scaling_group_arn:
+ description: The unique ARN of the autoscaling group
+ returned: success
+ type: str
+ sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg"
+availability_zones:
+ description: The availability zones for the auto scaling group
+ returned: success
+ type: list
+ sample: [
+ "us-east-1d"
+ ]
+created_time:
+ description: Timestamp of create time of the auto scaling group
+ returned: success
+ type: str
+ sample: "2017-11-08T14:41:48.272000+00:00"
+default_cooldown:
+ description: The default cooldown time in seconds.
+ returned: success
+ type: int
+ sample: 300
+desired_capacity:
+ description: The number of EC2 instances that should be running in this group.
+ returned: success
+ type: int
+ sample: 3
+healthcheck_period:
+ description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ returned: success
+ type: int
+ sample: 30
+healthcheck_type:
+ description: The service you want the health status from, one of "EC2" or "ELB".
+ returned: success
+ type: str
+ sample: "ELB"
+healthy_instances:
+ description: Number of instances in a healthy state
+ returned: success
+ type: int
+ sample: 5
+in_service_instances:
+ description: Number of instances in service
+ returned: success
+ type: int
+ sample: 3
+instance_facts:
+ description: Dictionary of EC2 instances and their status as it relates to the ASG.
+ returned: success
+ type: dict
+ sample: {
+ "i-0123456789012": {
+ "health_status": "Healthy",
+ "launch_config_name": "public-webapp-production-1",
+ "lifecycle_state": "InService"
+ }
+ }
+instances:
+ description: list of instance IDs in the ASG
+ returned: success
+ type: list
+ sample: [
+ "i-0123456789012"
+ ]
+launch_config_name:
+ description: >
+ Name of launch configuration associated with the ASG. Same as launch_configuration_name,
+ provided for compatibility with M(amazon.aws.autoscaling_group) module.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+load_balancers:
+ description: List of load balancers names attached to the ASG.
+ returned: success
+ type: list
+ sample: ["elb-webapp-prod"]
+max_instance_lifetime:
+ description: The maximum amount of time, in seconds, that an instance can be in service.
+ returned: success
+ type: int
+ sample: 604800
+max_size:
+ description: Maximum size of group
+ returned: success
+ type: int
+ sample: 3
+min_size:
+ description: Minimum size of group
+ returned: success
+ type: int
+ sample: 1
+mixed_instances_policy:
+ description: Returns the list of instance types if a mixed instances policy is set.
+ returned: success
+ type: list
+ sample: ["t3.micro", "t3a.micro"]
+mixed_instances_policy_full:
+ description: Returns the full dictionary representation of the mixed instances policy if a mixed instances policy is set.
+ returned: success
+ type: dict
+ sample: {
+ "instances_distribution": {
+ "on_demand_allocation_strategy": "prioritized",
+ "on_demand_base_capacity": 0,
+ "on_demand_percentage_above_base_capacity": 0,
+ "spot_allocation_strategy": "capacity-optimized"
+ },
+ "launch_template": {
+ "launch_template_specification": {
+ "launch_template_id": "lt-53c2425cffa544c23",
+ "launch_template_name": "random-LaunchTemplate",
+ "version": "2"
+ },
+ "overrides": [
+ {
+ "instance_type": "m5.xlarge"
+ },
+ {
+ "instance_type": "m5a.xlarge"
+ },
+ ]
+ }
+ }
+pending_instances:
+ description: Number of instances in pending state
+ returned: success
+ type: int
+ sample: 1
+tags:
+ description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
+ returned: success
+ type: list
+ sample: [
+ {
+ "key": "Name",
+ "value": "public-webapp-production-1",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ },
+ {
+ "key": "env",
+ "value": "production",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ }
+ ]
+target_group_arns:
+ description: List of ARNs of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
+ ]
+target_group_names:
+ description: List of names of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "target-group-host-hello",
+ "target-group-path-world"
+ ]
+termination_policies:
+ description: A list of termination policies for the group.
+ returned: success
+ type: list
+ sample: ["Default"]
+unhealthy_instances:
+ description: Number of instances in an unhealthy state
+ returned: success
+ type: int
+ sample: 0
+viable_instances:
+ description: Number of instances in a viable state
+ returned: success
+ type: int
+ sample: 1
+vpc_zone_identifier:
+ description: VPC zone ID / subnet id for the auto scaling group
+ returned: success
+ type: str
+ sample: "subnet-a31ef45f"
+metrics_collection:
+ description: List of enabled AutosSalingGroup metrics
+ returned: success
+ type: list
+ sample: [
+ {
+ "Granularity": "1Minute",
+ "Metric": "GroupInServiceInstances"
+ }
+ ]
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
+ 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
+ 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize',
+ 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies',
+ 'VPCZoneIdentifier')
+
+INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
+
+backoff_params = dict(retries=10, delay=3, backoff=1.5)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def describe_autoscaling_groups(connection, group_name):
+ pg = connection.get_paginator('describe_auto_scaling_groups')
+ return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', [])
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def deregister_lb_instances(connection, lb_name, instance_id):
+ connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)])
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def describe_instance_health(connection, lb_name, instances):
+ params = dict(LoadBalancerName=lb_name)
+ if instances:
+ params.update(Instances=instances)
+ return connection.describe_instance_health(**params)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def describe_target_health(connection, target_group_arn, instances):
+ return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def suspend_asg_processes(connection, asg_name, processes):
+ connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def resume_asg_processes(connection, asg_name, processes):
+ connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def describe_launch_configurations(connection, launch_config_name):
+ pg = connection.get_paginator('describe_launch_configurations')
+ return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result()
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def describe_launch_templates(connection, launch_template):
+ if launch_template['launch_template_id'] is not None:
+ try:
+ lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']])
+ return lt
+ except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'):
+ module.fail_json(msg="No launch template found matching: %s" % launch_template)
+ else:
+ try:
+ lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']])
+ return lt
+ except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'):
+ module.fail_json(msg="No launch template found matching: %s" % launch_template)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def create_asg(connection, **params):
+ connection.create_auto_scaling_group(**params)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def put_notification_config(connection, asg_name, topic_arn, notification_types):
+ connection.put_notification_configuration(
+ AutoScalingGroupName=asg_name,
+ TopicARN=topic_arn,
+ NotificationTypes=notification_types
+ )
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def del_notification_config(connection, asg_name, topic_arn):
+ connection.delete_notification_configuration(
+ AutoScalingGroupName=asg_name,
+ TopicARN=topic_arn
+ )
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def attach_load_balancers(connection, asg_name, load_balancers):
+ connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def detach_load_balancers(connection, asg_name, load_balancers):
+ connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def attach_lb_target_groups(connection, asg_name, target_group_arns):
+ connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def detach_lb_target_groups(connection, asg_name, target_group_arns):
+ connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def update_asg(connection, **params):
+ connection.update_auto_scaling_group(**params)
+
+
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params)
+def delete_asg(connection, asg_name, force_delete):
+ connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def terminate_asg_instance(connection, instance_id, decrement_capacity):
+ connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id,
+ ShouldDecrementDesiredCapacity=decrement_capacity)
+
+
+@AWSRetry.jittered_backoff(**backoff_params)
+def detach_asg_instances(connection, instance_ids, as_group_name, decrement_capacity):
+ connection.detach_instances(InstanceIds=instance_ids, AutoScalingGroupName=as_group_name,
+ ShouldDecrementDesiredCapacity=decrement_capacity)
+
+
+def enforce_required_arguments_for_create():
+ ''' As many arguments are not required for autoscale group deletion
+ they cannot be mandatory arguments for the module, so we enforce
+ them here '''
+ missing_args = []
+ if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None:
+ module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create")
+ for arg in ('min_size', 'max_size'):
+ if module.params[arg] is None:
+ missing_args.append(arg)
+ if missing_args:
+ module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args))
+
+
+def get_properties(autoscaling_group):
+ properties = dict(
+ healthy_instances=0,
+ in_service_instances=0,
+ unhealthy_instances=0,
+ pending_instances=0,
+ viable_instances=0,
+ terminating_instances=0
+ )
+ instance_facts = dict()
+ autoscaling_group_instances = autoscaling_group.get('Instances')
+
+ if autoscaling_group_instances:
+ properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances]
+ for i in autoscaling_group_instances:
+ instance_facts[i['InstanceId']] = {
+ 'health_status': i['HealthStatus'],
+ 'lifecycle_state': i['LifecycleState']
+ }
+ if 'LaunchConfigurationName' in i:
+ instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName']
+ elif 'LaunchTemplate' in i:
+ instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate']
+
+ if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService':
+ properties['viable_instances'] += 1
+
+ if i['HealthStatus'] == 'Healthy':
+ properties['healthy_instances'] += 1
+ else:
+ properties['unhealthy_instances'] += 1
+
+ if i['LifecycleState'] == 'InService':
+ properties['in_service_instances'] += 1
+ if i['LifecycleState'] == 'Terminating':
+ properties['terminating_instances'] += 1
+ if i['LifecycleState'] == 'Pending':
+ properties['pending_instances'] += 1
+ else:
+ properties['instances'] = []
+
+ properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName')
+ properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN')
+ properties['availability_zones'] = autoscaling_group.get('AvailabilityZones')
+ properties['created_time'] = autoscaling_group.get('CreatedTime')
+ properties['instance_facts'] = instance_facts
+ properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames')
+ if 'LaunchConfigurationName' in autoscaling_group:
+ properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName')
+ else:
+ properties['launch_template'] = autoscaling_group.get('LaunchTemplate')
+ properties['tags'] = autoscaling_group.get('Tags')
+ properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime')
+ properties['min_size'] = autoscaling_group.get('MinSize')
+ properties['max_size'] = autoscaling_group.get('MaxSize')
+ properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity')
+ properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
+ properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod')
+ properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType')
+ properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
+ properties['termination_policies'] = autoscaling_group.get('TerminationPolicies')
+ properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs')
+ properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier')
+ raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy')
+ if raw_mixed_instance_object:
+ properties['mixed_instances_policy_full'] = camel_dict_to_snake_dict(raw_mixed_instance_object)
+ properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')]
+
+ metrics = autoscaling_group.get('EnabledMetrics')
+ if metrics:
+ metrics.sort(key=lambda x: x["Metric"])
+ properties['metrics_collection'] = metrics
+
+ if properties['target_group_arns']:
+ elbv2_connection = module.client('elbv2')
+ tg_paginator = elbv2_connection.get_paginator('describe_target_groups')
+ tg_result = tg_paginator.paginate(
+ TargetGroupArns=properties['target_group_arns']
+ ).build_full_result()
+ target_groups = tg_result['TargetGroups']
+ else:
+ target_groups = []
+
+ properties['target_group_names'] = [
+ tg['TargetGroupName']
+ for tg in target_groups
+ ]
+
+ return properties
+
+
+def get_launch_object(connection, ec2_connection):
+ launch_object = dict()
+ launch_config_name = module.params.get('launch_config_name')
+ launch_template = module.params.get('launch_template')
+ mixed_instances_policy = module.params.get('mixed_instances_policy')
+ if launch_config_name is None and launch_template is None:
+ return launch_object
+ elif launch_config_name:
+ try:
+ launch_configs = describe_launch_configurations(connection, launch_config_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe launch configurations")
+ if len(launch_configs['LaunchConfigurations']) == 0:
+ module.fail_json(msg="No launch config found with name %s" % launch_config_name)
+ launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']}
+ return launch_object
+ elif launch_template:
+ lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0]
+ if launch_template['version'] is not None:
+ launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}}
+ else:
+ launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}}
+
+ if mixed_instances_policy:
+ instance_types = mixed_instances_policy.get('instance_types', [])
+ instances_distribution = mixed_instances_policy.get('instances_distribution', {})
+ policy = {
+ 'LaunchTemplate': {
+ 'LaunchTemplateSpecification': launch_object['LaunchTemplate']
+ }
+ }
+ if instance_types:
+ policy['LaunchTemplate']['Overrides'] = []
+ for instance_type in instance_types:
+ instance_type_dict = {'InstanceType': instance_type}
+ policy['LaunchTemplate']['Overrides'].append(instance_type_dict)
+ if instances_distribution:
+ instances_distribution_params = scrub_none_parameters(instances_distribution)
+ policy['InstancesDistribution'] = snake_dict_to_camel_dict(instances_distribution_params, capitalize_first=True)
+ launch_object['MixedInstancesPolicy'] = policy
+ return launch_object
+
+
+def elb_dreg(asg_connection, group_name, instance_id):
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+ wait_timeout = module.params.get('wait_timeout')
+ count = 1
+ if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB':
+ elb_connection = module.client('elb')
+ else:
+ return
+
+ for lb in as_group['LoadBalancerNames']:
+ deregister_lb_instances(elb_connection, lb, instance_id)
+ module.debug("De-registering %s from ELB %s" % (instance_id, lb))
+
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and count > 0:
+ count = 0
+ for lb in as_group['LoadBalancerNames']:
+ lb_instances = describe_instance_health(elb_connection, lb, [])
+ for i in lb_instances['InstanceStates']:
+ if i['InstanceId'] == instance_id and i['State'] == "InService":
+ count += 1
+ module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description']))
+ time.sleep(10)
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime()))
+
+
+def elb_healthy(asg_connection, elb_connection, group_name):
+ healthy_instances = set()
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+ props = get_properties(as_group)
+ # get healthy, inservice instances from ASG
+ instances = []
+ for instance, settings in props['instance_facts'].items():
+ if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
+ instances.append(dict(InstanceId=instance))
+ module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
+ module.debug("ELB instance status:")
+ lb_instances = list()
+ for lb in as_group.get('LoadBalancerNames'):
+ # we catch a race condition that sometimes happens if the instance exists in the ASG
+ # but has not yet show up in the ELB
+ try:
+ lb_instances = describe_instance_health(elb_connection, lb, instances)
+ except is_boto3_error_code('InvalidInstance'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get load balancer.")
+
+ for i in lb_instances.get('InstanceStates'):
+ if i['State'] == "InService":
+ healthy_instances.add(i['InstanceId'])
+ module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State']))
+ return len(healthy_instances)
+
+
+def tg_healthy(asg_connection, elbv2_connection, group_name):
+ healthy_instances = set()
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+ props = get_properties(as_group)
+ # get healthy, inservice instances from ASG
+ instances = []
+ for instance, settings in props['instance_facts'].items():
+ if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
+ instances.append(dict(Id=instance))
+ module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
+ module.debug("Target Group instance status:")
+ tg_instances = list()
+ for tg in as_group.get('TargetGroupARNs'):
+ # we catch a race condition that sometimes happens if the instance exists in the ASG
+ # but has not yet show up in the ELB
+ try:
+ tg_instances = describe_target_health(elbv2_connection, tg, instances)
+ except is_boto3_error_code('InvalidInstance'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get target group.")
+
+ for i in tg_instances.get('TargetHealthDescriptions'):
+ if i['TargetHealth']['State'] == "healthy":
+ healthy_instances.add(i['Target']['Id'])
+ module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State']))
+ return len(healthy_instances)
+
+
+def wait_for_elb(asg_connection, group_name):
+ wait_timeout = module.params.get('wait_timeout')
+
+ # if the health_check_type is ELB, we want to query the ELBs directly for instance
+ # status as to avoid health_check_grace period that is awarded to ASG instances
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+
+ if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
+ module.debug("Waiting for ELB to consider instances healthy.")
+ elb_connection = module.client('elb')
+
+ wait_timeout = time.time() + wait_timeout
+ healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
+
+ while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
+ healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
+ module.debug("ELB thinks %s instances are healthy." % healthy_instances)
+ time.sleep(10)
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
+ module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances)
+
+
+def wait_for_target_group(asg_connection, group_name):
+ wait_timeout = module.params.get('wait_timeout')
+
+ # if the health_check_type is ELB, we want to query the ELBs directly for instance
+ # status as to avoid health_check_grace period that is awarded to ASG instances
+ as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
+
+ if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
+ module.debug("Waiting for Target Group to consider instances healthy.")
+ elbv2_connection = module.client('elbv2')
+
+ wait_timeout = time.time() + wait_timeout
+ healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
+
+ while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
+ healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
+ module.debug("Target Group thinks %s instances are healthy." % healthy_instances)
+ time.sleep(10)
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
+ module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances)
+
+
+def suspend_processes(ec2_connection, as_group):
+ suspend_processes = set(module.params.get('suspend_processes'))
+
+ try:
+ suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']])
+ except AttributeError:
+ # New ASG being created, no suspended_processes defined yet
+ suspended_processes = set()
+
+ if suspend_processes == suspended_processes:
+ return False
+
+ resume_processes = list(suspended_processes - suspend_processes)
+ if resume_processes:
+ resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes)
+
+ if suspend_processes:
+ suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes))
+
+ return True
+
+
+def create_autoscaling_group(connection):
+ group_name = module.params.get('name')
+ load_balancers = module.params['load_balancers']
+ target_group_arns = module.params['target_group_arns']
+ availability_zones = module.params['availability_zones']
+ launch_config_name = module.params.get('launch_config_name')
+ launch_template = module.params.get('launch_template')
+ mixed_instances_policy = module.params.get('mixed_instances_policy')
+ min_size = module.params['min_size']
+ max_size = module.params['max_size']
+ max_instance_lifetime = module.params.get('max_instance_lifetime')
+ placement_group = module.params.get('placement_group')
+ desired_capacity = module.params.get('desired_capacity')
+ vpc_zone_identifier = module.params.get('vpc_zone_identifier')
+ set_tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ health_check_period = module.params.get('health_check_period')
+ health_check_type = module.params.get('health_check_type')
+ default_cooldown = module.params.get('default_cooldown')
+ wait_for_instances = module.params.get('wait_for_instances')
+ wait_timeout = module.params.get('wait_timeout')
+ termination_policies = module.params.get('termination_policies')
+ notification_topic = module.params.get('notification_topic')
+ notification_types = module.params.get('notification_types')
+ metrics_collection = module.params.get('metrics_collection')
+ metrics_granularity = module.params.get('metrics_granularity')
+ metrics_list = module.params.get('metrics_list')
+
+ try:
+ as_groups = describe_autoscaling_groups(connection, group_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe auto scaling groups.")
+
+ ec2_connection = module.client('ec2')
+
+ if vpc_zone_identifier:
+ vpc_zone_identifier = ','.join(vpc_zone_identifier)
+
+ asg_tags = []
+ for tag in set_tags:
+ for k, v in tag.items():
+ if k != 'propagate_at_launch':
+ asg_tags.append(dict(Key=k,
+ Value=to_native(v),
+ PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)),
+ ResourceType='auto-scaling-group',
+ ResourceId=group_name))
+ if not as_groups:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have created AutoScalingGroup if not in check_mode.")
+
+ if not vpc_zone_identifier and not availability_zones:
+ availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for
+ zone in ec2_connection.describe_availability_zones()['AvailabilityZones']]
+
+ enforce_required_arguments_for_create()
+
+ if desired_capacity is None:
+ desired_capacity = min_size
+ ag = dict(
+ AutoScalingGroupName=group_name,
+ MinSize=min_size,
+ MaxSize=max_size,
+ DesiredCapacity=desired_capacity,
+ Tags=asg_tags,
+ HealthCheckGracePeriod=health_check_period,
+ HealthCheckType=health_check_type,
+ DefaultCooldown=default_cooldown,
+ TerminationPolicies=termination_policies)
+ if vpc_zone_identifier:
+ ag['VPCZoneIdentifier'] = vpc_zone_identifier
+ if availability_zones:
+ ag['AvailabilityZones'] = availability_zones
+ if placement_group:
+ ag['PlacementGroup'] = placement_group
+ if load_balancers:
+ ag['LoadBalancerNames'] = load_balancers
+ if target_group_arns:
+ ag['TargetGroupARNs'] = target_group_arns
+ if max_instance_lifetime:
+ ag['MaxInstanceLifetime'] = max_instance_lifetime
+
+ launch_object = get_launch_object(connection, ec2_connection)
+ if 'LaunchConfigurationName' in launch_object:
+ ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName']
+ elif 'LaunchTemplate' in launch_object:
+ if 'MixedInstancesPolicy' in launch_object:
+ ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy']
+ else:
+ ag['LaunchTemplate'] = launch_object['LaunchTemplate']
+ else:
+ module.fail_json_aws(e, msg="Missing LaunchConfigurationName or LaunchTemplate")
+
+ try:
+ create_asg(connection, **ag)
+ if metrics_collection:
+ connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
+
+ all_ag = describe_autoscaling_groups(connection, group_name)
+ if len(all_ag) == 0:
+ module.fail_json(msg="No auto scaling group found with the name %s" % group_name)
+ as_group = all_ag[0]
+ suspend_processes(connection, as_group)
+ if wait_for_instances:
+ wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
+ if load_balancers:
+ wait_for_elb(connection, group_name)
+ # Wait for target group health if target group(s)defined
+ if target_group_arns:
+ wait_for_target_group(connection, group_name)
+ if notification_topic:
+ put_notification_config(connection, group_name, notification_topic, notification_types)
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ asg_properties = get_properties(as_group)
+ changed = True
+ return changed, asg_properties
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create Autoscaling Group.")
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have modified AutoScalingGroup if required if not in check_mode.")
+
+ as_group = as_groups[0]
+ initial_asg_properties = get_properties(as_group)
+ changed = False
+
+ if suspend_processes(connection, as_group):
+ changed = True
+
+ # process tag changes
+ have_tags = as_group.get('Tags')
+ want_tags = asg_tags
+ if purge_tags and not want_tags and have_tags:
+ connection.delete_tags(Tags=list(have_tags))
+
+ if len(set_tags) > 0:
+ if have_tags:
+ have_tags.sort(key=lambda x: x["Key"])
+ if want_tags:
+ want_tags.sort(key=lambda x: x["Key"])
+ dead_tags = []
+ have_tag_keyvals = [x['Key'] for x in have_tags]
+ want_tag_keyvals = [x['Key'] for x in want_tags]
+
+ for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals):
+ changed = True
+ if purge_tags:
+ dead_tags.append(dict(
+ ResourceId=as_group['AutoScalingGroupName'], ResourceType='auto-scaling-group', Key=dead_tag))
+ have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag]
+
+ if dead_tags:
+ connection.delete_tags(Tags=dead_tags)
+
+ zipped = zip(have_tags, want_tags)
+ if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped):
+ changed = True
+ connection.create_or_update_tags(Tags=asg_tags)
+
+ # Handle load balancer attachments/detachments
+ # Attach load balancers if they are specified but none currently exist
+ if load_balancers and not as_group['LoadBalancerNames']:
+ changed = True
+ try:
+ attach_load_balancers(connection, group_name, load_balancers)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update Autoscaling Group.")
+
+ # Update load balancers if they are specified and one or more already exists
+ elif as_group['LoadBalancerNames']:
+ change_load_balancers = load_balancers is not None
+ # Get differences
+ if not load_balancers:
+ load_balancers = list()
+ wanted_elbs = set(load_balancers)
+
+ has_elbs = set(as_group['LoadBalancerNames'])
+ # check if all requested are already existing
+ if has_elbs - wanted_elbs and change_load_balancers:
+ # if wanted contains less than existing, then we need to delete some
+ elbs_to_detach = has_elbs.difference(wanted_elbs)
+ if elbs_to_detach:
+ changed = True
+ try:
+ detach_load_balancers(connection, group_name, list(elbs_to_detach))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to detach load balancers {0}".format(elbs_to_detach))
+ if wanted_elbs - has_elbs:
+ # if has contains less than wanted, then we need to add some
+ elbs_to_attach = wanted_elbs.difference(has_elbs)
+ if elbs_to_attach:
+ changed = True
+ try:
+ attach_load_balancers(connection, group_name, list(elbs_to_attach))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to attach load balancers {0}".format(elbs_to_attach))
+
+ # Handle target group attachments/detachments
+ # Attach target groups if they are specified but none currently exist
+ if target_group_arns and not as_group['TargetGroupARNs']:
+ changed = True
+ try:
+ attach_lb_target_groups(connection, group_name, target_group_arns)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update Autoscaling Group.")
+ # Update target groups if they are specified and one or more already exists
+ elif target_group_arns is not None and as_group['TargetGroupARNs']:
+ # Get differences
+ wanted_tgs = set(target_group_arns)
+ has_tgs = set(as_group['TargetGroupARNs'])
+
+ tgs_to_detach = has_tgs.difference(wanted_tgs)
+ if tgs_to_detach:
+ changed = True
+ try:
+ detach_lb_target_groups(connection, group_name, list(tgs_to_detach))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach))
+
+ tgs_to_attach = wanted_tgs.difference(has_tgs)
+ if tgs_to_attach:
+ changed = True
+ try:
+ attach_lb_target_groups(connection, group_name, list(tgs_to_attach))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach))
+
+ # check for attributes that aren't required for updating an existing ASG
+ # check if min_size/max_size/desired capacity have been specified and if not use ASG values
+ if min_size is None:
+ min_size = as_group['MinSize']
+ if max_size is None:
+ max_size = as_group['MaxSize']
+ if desired_capacity is None:
+ desired_capacity = as_group['DesiredCapacity']
+ ag = dict(
+ AutoScalingGroupName=group_name,
+ MinSize=min_size,
+ MaxSize=max_size,
+ DesiredCapacity=desired_capacity,
+ HealthCheckGracePeriod=health_check_period,
+ HealthCheckType=health_check_type,
+ DefaultCooldown=default_cooldown,
+ TerminationPolicies=termination_policies)
+
+ # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not.
+ launch_object = get_launch_object(connection, ec2_connection)
+ if 'LaunchConfigurationName' in launch_object:
+ ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName']
+ elif 'LaunchTemplate' in launch_object:
+ if 'MixedInstancesPolicy' in launch_object:
+ ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy']
+ else:
+ ag['LaunchTemplate'] = launch_object['LaunchTemplate']
+ else:
+ try:
+ ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName']
+ except Exception:
+ launch_template = as_group['LaunchTemplate']
+ # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg.
+ ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']}
+
+ if availability_zones:
+ ag['AvailabilityZones'] = availability_zones
+ if vpc_zone_identifier:
+ ag['VPCZoneIdentifier'] = vpc_zone_identifier
+ if max_instance_lifetime is not None:
+ ag['MaxInstanceLifetime'] = max_instance_lifetime
+
+ try:
+ update_asg(connection, **ag)
+
+ if metrics_collection:
+ connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
+ else:
+ connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list)
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update autoscaling group")
+
+ if notification_topic:
+ try:
+ put_notification_config(connection, group_name, notification_topic, notification_types)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update Autoscaling Group notifications.")
+ if wait_for_instances:
+ wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
+ # Wait for ELB health if ELB(s)defined
+ if load_balancers:
+ module.debug('\tWAITING FOR ELB HEALTH')
+ wait_for_elb(connection, group_name)
+ # Wait for target group health if target group(s)defined
+
+ if target_group_arns:
+ module.debug('\tWAITING FOR TG HEALTH')
+ wait_for_target_group(connection, group_name)
+
+ try:
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ asg_properties = get_properties(as_group)
+ if asg_properties != initial_asg_properties:
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to read existing Autoscaling Groups.")
+ return changed, asg_properties
+
+
+def delete_autoscaling_group(connection):
+ group_name = module.params.get('name')
+ notification_topic = module.params.get('notification_topic')
+ wait_for_instances = module.params.get('wait_for_instances')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if notification_topic:
+ del_notification_config(connection, group_name, notification_topic)
+ groups = describe_autoscaling_groups(connection, group_name)
+ if groups:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup if not in check_mode.")
+ wait_timeout = time.time() + wait_timeout
+ if not wait_for_instances:
+ delete_asg(connection, group_name, force_delete=True)
+ else:
+ updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
+ update_asg(connection, **updated_params)
+ instances = True
+ while instances and wait_for_instances and wait_timeout >= time.time():
+ tmp_groups = describe_autoscaling_groups(connection, group_name)
+ if tmp_groups:
+ tmp_group = tmp_groups[0]
+ if not tmp_group.get('Instances'):
+ instances = False
+ time.sleep(10)
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
+
+ delete_asg(connection, group_name, force_delete=False)
+ while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time():
+ time.sleep(5)
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime())
+ return True
+
+ return False
+
+
+def get_chunks(l, n):
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def update_size(connection, group, max_size, min_size, dc):
+ module.debug("setting ASG sizes")
+ module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size))
+ updated_group = dict()
+ updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
+ updated_group['MinSize'] = min_size
+ updated_group['MaxSize'] = max_size
+ updated_group['DesiredCapacity'] = dc
+ update_asg(connection, **updated_group)
+
+
+def replace(connection):
+ batch_size = module.params.get('replace_batch_size')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_for_instances = module.params.get('wait_for_instances')
+ group_name = module.params.get('name')
+ max_size = module.params.get('max_size')
+ min_size = module.params.get('min_size')
+ desired_capacity = module.params.get('desired_capacity')
+ launch_config_name = module.params.get('launch_config_name')
+
+ # Required to maintain the default value being set to 'true'
+ if launch_config_name:
+ lc_check = module.params.get('lc_check')
+ else:
+ lc_check = False
+ # Mirror above behavior for Launch Templates
+ launch_template = module.params.get('launch_template')
+ if launch_template:
+ lt_check = module.params.get('lt_check')
+ else:
+ lt_check = False
+ replace_instances = module.params.get('replace_instances')
+ replace_all_instances = module.params.get('replace_all_instances')
+
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ if desired_capacity is None:
+ desired_capacity = as_group['DesiredCapacity']
+
+ if wait_for_instances:
+ wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
+
+ props = get_properties(as_group)
+ instances = props['instances']
+ if replace_all_instances:
+ # If replacing all instances, then set replace_instances to current set
+ # This allows replace_instances and replace_all_instances to behave same
+ replace_instances = instances
+ if replace_instances:
+ instances = replace_instances
+
+ # check to see if instances are replaceable if checking launch configs
+ if launch_config_name:
+ new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances)
+ elif launch_template:
+ new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances)
+
+ num_new_inst_needed = desired_capacity - len(new_instances)
+
+ if lc_check or lt_check:
+ if num_new_inst_needed == 0 and old_instances:
+ module.debug("No new instances needed, but old instances are present. Removing old instances")
+ terminate_batch(connection, old_instances, instances, True)
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ changed = True
+ return changed, props
+
+ # we don't want to spin up extra instances if not necessary
+ if num_new_inst_needed < batch_size:
+ module.debug("Overriding batch size to %s" % num_new_inst_needed)
+ batch_size = num_new_inst_needed
+
+ if not old_instances:
+ changed = False
+ return changed, props
+
+ # check if min_size/max_size/desired capacity have been specified and if not use ASG values
+ if min_size is None:
+ min_size = as_group['MinSize']
+ if max_size is None:
+ max_size = as_group['MaxSize']
+
+ # set temporary settings and wait for them to be reached
+ # This should get overwritten if the number of instances left is less than the batch size.
+
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
+
+ if wait_for_instances:
+ wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
+ wait_for_elb(connection, group_name)
+ wait_for_target_group(connection, group_name)
+
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ instances = props['instances']
+ if replace_instances:
+ instances = replace_instances
+
+ module.debug("beginning main loop")
+ for i in get_chunks(instances, batch_size):
+ # break out of this loop if we have enough new instances
+ break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False)
+
+ if wait_for_instances:
+ wait_for_term_inst(connection, term_instances)
+ wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
+ wait_for_elb(connection, group_name)
+ wait_for_target_group(connection, group_name)
+
+ if break_early:
+ module.debug("breaking loop")
+ break
+
+ update_size(connection, as_group, max_size, min_size, desired_capacity)
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ asg_properties = get_properties(as_group)
+ module.debug("Rolling update complete.")
+ changed = True
+ return changed, asg_properties
+
+
+def detach(connection):
+ group_name = module.params.get('name')
+ detach_instances = module.params.get('detach_instances')
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ decrement_desired_capacity = module.params.get('decrement_desired_capacity')
+ min_size = module.params.get('min_size')
+ props = get_properties(as_group)
+ instances = props['instances']
+
+ # check if provided instance exists in asg, create list of instances to detach which exist in asg
+ instances_to_detach = []
+ for instance_id in detach_instances:
+ if instance_id in instances:
+ instances_to_detach.append(instance_id)
+
+ # check if setting decrement_desired_capacity will make desired_capacity smaller
+ # than the currently set minimum size in ASG configuration
+ if decrement_desired_capacity:
+ decremented_desired_capacity = len(instances) - len(instances_to_detach)
+ if min_size and min_size > decremented_desired_capacity:
+ module.fail_json(
+ msg="Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to {0}\
+ which is below current min_size {1}, please update AutoScalingGroup Sizes properly.".format(decremented_desired_capacity, min_size))
+
+ if instances_to_detach:
+ try:
+ detach_asg_instances(connection, instances_to_detach, group_name, decrement_desired_capacity)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to detach instances from AutoScaling Group")
+
+ asg_properties = get_properties(as_group)
+ return True, asg_properties
+
+
+def get_instances_by_launch_config(props, lc_check, initial_instances):
+ new_instances = []
+ old_instances = []
+ # old instances are those that have the old launch config
+ if lc_check:
+ for i in props['instances']:
+ # Check if migrating from launch_template to launch_config first
+ if 'launch_template' in props['instance_facts'][i]:
+ old_instances.append(i)
+ elif props['instance_facts'][i].get('launch_config_name') == props['launch_config_name']:
+ new_instances.append(i)
+ else:
+ old_instances.append(i)
+
+ else:
+ module.debug("Comparing initial instances with current: %s" % initial_instances)
+ for i in props['instances']:
+ if i not in initial_instances:
+ new_instances.append(i)
+ else:
+ old_instances.append(i)
+
+ module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
+ module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
+
+ return new_instances, old_instances
+
+
+def get_instances_by_launch_template(props, lt_check, initial_instances):
+ new_instances = []
+ old_instances = []
+ # old instances are those that have the old launch template or version of the same launch template
+ if lt_check:
+ for i in props['instances']:
+ # Check if migrating from launch_config_name to launch_template_name first
+ if 'launch_config_name' in props['instance_facts'][i]:
+ old_instances.append(i)
+ elif props['instance_facts'][i].get('launch_template') == props['launch_template']:
+ new_instances.append(i)
+ else:
+ old_instances.append(i)
+ else:
+ module.debug("Comparing initial instances with current: %s" % initial_instances)
+ for i in props['instances']:
+ if i not in initial_instances:
+ new_instances.append(i)
+ else:
+ old_instances.append(i)
+
+ module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
+ module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
+
+ return new_instances, old_instances
+
+
+def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances):
+ instances_to_terminate = []
+ instances = (inst_id for inst_id in replace_instances if inst_id in props['instances'])
+ # check to make sure instances given are actually in the given ASG
+ # and they have a non-current launch config
+ if 'launch_config_name' in module.params:
+ if lc_check:
+ for i in instances:
+ if (
+ 'launch_template' in props['instance_facts'][i]
+ or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']
+ ):
+ instances_to_terminate.append(i)
+ else:
+ for i in instances:
+ if i in initial_instances:
+ instances_to_terminate.append(i)
+ elif 'launch_template' in module.params:
+ if lt_check:
+ for i in instances:
+ if (
+ 'launch_config_name' in props['instance_facts'][i]
+ or props['instance_facts'][i]['launch_template'] != props['launch_template']
+ ):
+ instances_to_terminate.append(i)
+ else:
+ for i in instances:
+ if i in initial_instances:
+ instances_to_terminate.append(i)
+
+ return instances_to_terminate
+
+
+def terminate_batch(connection, replace_instances, initial_instances, leftovers=False):
+ batch_size = module.params.get('replace_batch_size')
+ min_size = module.params.get('min_size')
+ desired_capacity = module.params.get('desired_capacity')
+ group_name = module.params.get('name')
+ lc_check = module.params.get('lc_check')
+ lt_check = module.params.get('lt_check')
+ decrement_capacity = False
+ break_loop = False
+
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ if desired_capacity is None:
+ desired_capacity = as_group['DesiredCapacity']
+
+ props = get_properties(as_group)
+ desired_size = as_group['MinSize']
+ if module.params.get('launch_config_name'):
+ new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances)
+ else:
+ new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances)
+ num_new_inst_needed = desired_capacity - len(new_instances)
+
+ # check to make sure instances given are actually in the given ASG
+ # and they have a non-current launch config
+ instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances)
+
+ module.debug("new instances needed: %s" % num_new_inst_needed)
+ module.debug("new instances: %s" % new_instances)
+ module.debug("old instances: %s" % old_instances)
+ module.debug("batch instances: %s" % ",".join(instances_to_terminate))
+
+ if num_new_inst_needed == 0:
+ decrement_capacity = True
+ if as_group['MinSize'] != min_size:
+ if min_size is None:
+ min_size = as_group['MinSize']
+ updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
+ update_asg(connection, **updated_params)
+ module.debug("Updating minimum size back to original of %s" % min_size)
+ # if are some leftover old instances, but we are already at capacity with new ones
+ # we don't want to decrement capacity
+ if leftovers:
+ decrement_capacity = False
+ break_loop = True
+ instances_to_terminate = old_instances
+ desired_size = min_size
+ module.debug("No new instances needed")
+
+ if num_new_inst_needed < batch_size and num_new_inst_needed != 0:
+ instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
+ decrement_capacity = False
+ break_loop = False
+ module.debug("%s new instances needed" % num_new_inst_needed)
+
+ module.debug("decrementing capacity: %s" % decrement_capacity)
+
+ for instance_id in instances_to_terminate:
+ elb_dreg(connection, group_name, instance_id)
+ module.debug("terminating instance: %s" % instance_id)
+ terminate_asg_instance(connection, instance_id, decrement_capacity)
+
+ # we wait to make sure the machines we marked as Unhealthy are
+ # no longer in the list
+
+ return break_loop, desired_size, instances_to_terminate
+
+
+def wait_for_term_inst(connection, term_instances):
+ wait_timeout = module.params.get('wait_timeout')
+ group_name = module.params.get('name')
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ count = 1
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and count > 0:
+ module.debug("waiting for instances to terminate")
+ count = 0
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ instance_facts = props['instance_facts']
+ instances = (i for i in instance_facts if i in term_instances)
+ for i in instances:
+ lifecycle = instance_facts[i]['lifecycle_state']
+ health = instance_facts[i]['health_status']
+ module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health))
+ if lifecycle.startswith('Terminating') or health == 'Unhealthy':
+ count += 1
+ time.sleep(10)
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
+
+
+def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop):
+ # make sure we have the latest stats after that last loop.
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
+ # now we make sure that we have enough instances in a viable state
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and desired_size > props[prop]:
+ module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
+ time.sleep(10)
+ as_group = describe_autoscaling_groups(connection, group_name)[0]
+ props = get_properties(as_group)
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
+ module.debug("Reached %s: %s" % (prop, desired_size))
+ return props
+
+
+def asg_exists(connection):
+ group_name = module.params.get('name')
+ as_group = describe_autoscaling_groups(connection, group_name)
+ return bool(len(as_group))
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ load_balancers=dict(type='list', elements='str'),
+ target_group_arns=dict(type='list', elements='str'),
+ availability_zones=dict(type='list', elements='str'),
+ launch_config_name=dict(type='str'),
+ launch_template=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ version=dict(type='str'),
+ launch_template_name=dict(type='str'),
+ launch_template_id=dict(type='str'),
+ )
+ ),
+ min_size=dict(type='int'),
+ max_size=dict(type='int'),
+ max_instance_lifetime=dict(type='int'),
+ mixed_instances_policy=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ instance_types=dict(
+ type='list',
+ elements='str'
+ ),
+ instances_distribution=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ on_demand_allocation_strategy=dict(type='str'),
+ on_demand_base_capacity=dict(type='int'),
+ on_demand_percentage_above_base_capacity=dict(type='int'),
+ spot_allocation_strategy=dict(type='str'),
+ spot_instance_pools=dict(type='int'),
+ spot_max_price=dict(type='str'),
+ )
+ )
+ )
+ ),
+ placement_group=dict(type='str'),
+ desired_capacity=dict(type='int'),
+ vpc_zone_identifier=dict(type='list', elements='str'),
+ replace_batch_size=dict(type='int', default=1),
+ replace_all_instances=dict(type='bool', default=False),
+ replace_instances=dict(type='list', default=[], elements='str'),
+ detach_instances=dict(type='list', default=[], elements='str'),
+ decrement_desired_capacity=dict(type='bool', default=False),
+ lc_check=dict(type='bool', default=True),
+ lt_check=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='list', default=[], elements='dict'),
+ purge_tags=dict(type='bool', default=False),
+ health_check_period=dict(type='int', default=300),
+ health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
+ default_cooldown=dict(type='int', default=300),
+ wait_for_instances=dict(type='bool', default=True),
+ termination_policies=dict(type='list', default='Default', elements='str'),
+ notification_topic=dict(type='str', default=None),
+ notification_types=dict(
+ type='list',
+ default=[
+ 'autoscaling:EC2_INSTANCE_LAUNCH',
+ 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
+ 'autoscaling:EC2_INSTANCE_TERMINATE',
+ 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
+ ],
+ elements='str'
+ ),
+ suspend_processes=dict(type='list', default=[], elements='str'),
+ metrics_collection=dict(type='bool', default=False),
+ metrics_granularity=dict(type='str', default='1Minute'),
+ metrics_list=dict(
+ type='list',
+ default=[
+ 'GroupMinSize',
+ 'GroupMaxSize',
+ 'GroupDesiredCapacity',
+ 'GroupInServiceInstances',
+ 'GroupPendingInstances',
+ 'GroupStandbyInstances',
+ 'GroupTerminatingInstances',
+ 'GroupTotalInstances'
+ ],
+ elements='str'
+ )
+ )
+
+ global module
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['replace_all_instances', 'replace_instances'],
+ ['replace_all_instances', 'detach_instances'],
+ ['launch_config_name', 'launch_template'],
+ ]
+ )
+
+ state = module.params.get('state')
+ replace_instances = module.params.get('replace_instances')
+ replace_all_instances = module.params.get('replace_all_instances')
+ detach_instances = module.params.get('detach_instances')
+
+ connection = module.client('autoscaling')
+ changed = create_changed = replace_changed = detach_changed = False
+ exists = asg_exists(connection)
+
+ if state == 'present':
+ create_changed, asg_properties = create_autoscaling_group(connection)
+ elif state == 'absent':
+ changed = delete_autoscaling_group(connection)
+ module.exit_json(changed=changed)
+
+ # Only replace instances if asg existed at start of call
+ if (
+ exists
+ and (replace_all_instances or replace_instances)
+ and (module.params.get('launch_config_name') or module.params.get('launch_template'))
+ ):
+ replace_changed, asg_properties = replace(connection)
+
+ # Only detach instances if asg existed at start of call
+ if (
+ exists
+ and (detach_instances)
+ and (module.params.get('launch_config_name') or module.params.get('launch_template'))
+ ):
+ detach_changed, asg_properties = detach(connection)
+
+ if create_changed or replace_changed or detach_changed:
+ changed = True
+
+ module.exit_json(changed=changed, **asg_properties)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py
new file mode 100644
index 00000000..17fc4bec
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py
@@ -0,0 +1,460 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: autoscaling_group_info
+version_added: 5.0.0
+short_description: Gather information about EC2 Auto Scaling Groups (ASGs) in AWS
+description:
+ - Gather information about EC2 Auto Scaling Groups (ASGs) in AWS.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_info).
+ The usage did not change.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - "Rob White (@wimnat)"
+options:
+ name:
+ description:
+ - The prefix or name of the auto scaling group(s) you are searching for.
+ - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
+ type: str
+ required: false
+ tags:
+ description:
+ - >
+ A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
+ group(s) you are searching for.
+ required: false
+ type: dict
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Find all groups
+ amazon.aws.autoscaling_group_info:
+ register: asgs
+
+- name: Find a group with matching name/prefix
+ amazon.aws.autoscaling_group_info:
+ name: public-webserver-asg
+ register: asgs
+
+- name: Find a group with matching tags
+ amazon.aws.autoscaling_group_info:
+ tags:
+ project: webapp
+ env: production
+ register: asgs
+
+- name: Find a group with matching name/prefix and tags
+ amazon.aws.autoscaling_group_info:
+ name: myproject
+ tags:
+ env: production
+ register: asgs
+
+- name: Fail if no groups are found
+ amazon.aws.autoscaling_group_info:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length == 0 }}"
+
+- name: Fail if more than 1 group is found
+ amazon.aws.autoscaling_group_info:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length > 1 }}"
+'''
+
+RETURN = '''
+---
+auto_scaling_group_arn:
+ description: The Amazon Resource Name of the ASG
+ returned: success
+ type: str
+ sample: "arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
+auto_scaling_group_name:
+ description: Name of autoscaling group
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+availability_zones:
+ description: List of Availability Zones that are enabled for this ASG.
+ returned: success
+ type: list
+ sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
+created_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-11-25T00:05:36.309Z"
+default_cooldown:
+ description: The default cooldown time in seconds.
+ returned: success
+ type: int
+ sample: 300
+desired_capacity:
+ description: The number of EC2 instances that should be running in this group.
+ returned: success
+ type: int
+ sample: 3
+health_check_period:
+ description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ returned: success
+ type: int
+ sample: 30
+health_check_type:
+ description: The service you want the health status from, one of "EC2" or "ELB".
+ returned: success
+ type: str
+ sample: "ELB"
+instances:
+ description: List of EC2 instances and their status as it relates to the ASG.
+ returned: success
+ type: list
+ sample: [
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-es22ad25",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": "false"
+ }
+ ]
+launch_config_name:
+ description: >
+ Name of launch configuration associated with the ASG. Same as launch_configuration_name,
+ provided for compatibility with M(amazon.aws.autoscaling_group) module.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+launch_configuration_name:
+ description: Name of launch configuration associated with the ASG.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+lifecycle_hooks:
+ description: List of lifecycle hooks for the ASG.
+ returned: success
+ type: list
+ sample: [
+ {
+ "AutoScalingGroupName": "public-webapp-production-1",
+ "DefaultResult": "ABANDON",
+ "GlobalTimeout": 172800,
+ "HeartbeatTimeout": 3600,
+ "LifecycleHookName": "instance-launch",
+ "LifecycleTransition": "autoscaling:EC2_INSTANCE_LAUNCHING"
+ },
+ {
+ "AutoScalingGroupName": "public-webapp-production-1",
+ "DefaultResult": "ABANDON",
+ "GlobalTimeout": 172800,
+ "HeartbeatTimeout": 3600,
+ "LifecycleHookName": "instance-terminate",
+ "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING"
+ }
+ ]
+load_balancer_names:
+ description: List of load balancers names attached to the ASG.
+ returned: success
+ type: list
+ sample: ["elb-webapp-prod"]
+max_size:
+ description: Maximum size of group
+ returned: success
+ type: int
+ sample: 3
+min_size:
+ description: Minimum size of group
+ returned: success
+ type: int
+ sample: 1
+new_instances_protected_from_scale_in:
+ description: Whether or not new instances a protected from automatic scaling in.
+ returned: success
+ type: bool
+ sample: "false"
+placement_group:
+ description: Placement group into which instances are launched, if any.
+ returned: success
+ type: str
+ sample: None
+status:
+ description: The current state of the group when DeleteAutoScalingGroup is in progress.
+ returned: success
+ type: str
+ sample: None
+tags:
+ description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
+ returned: success
+ type: list
+ sample: [
+ {
+ "key": "Name",
+ "value": "public-webapp-production-1",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ },
+ {
+ "key": "env",
+ "value": "production",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ }
+ ]
+target_group_arns:
+ description: List of ARNs of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
+ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
+ ]
+target_group_names:
+ description: List of names of the target groups that the ASG populates
+ returned: success
+ type: list
+ sample: [
+ "target-group-host-hello",
+ "target-group-path-world"
+ ]
+termination_policies:
+ description: A list of termination policies for the group.
+ returned: success
+ type: str
+ sample: ["Default"]
+'''
+
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def match_asg_tags(tags_to_match, asg):
+ for key, value in tags_to_match.items():
+ for tag in asg['Tags']:
+ if key == tag['Key'] and value == tag['Value']:
+ break
+ else:
+ return False
+ return True
+
+
+def find_asgs(conn, module, name=None, tags=None):
+ """
+ Args:
+ conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
+ name (str): Optional name of the ASG you are looking for.
+ tags (dict): Optional dictionary of tags and values to search for.
+
+ Basic Usage:
+ >>> name = 'public-webapp-production'
+ >>> tags = { 'env': 'production' }
+ >>> conn = boto3.client('autoscaling', region_name='us-west-2')
+ >>> results = find_asgs(name, conn)
+
+ Returns:
+ List
+ [
+ {
+ "auto_scaling_group_arn": (
+ "arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
+ "autoScalingGroupName/public-webapp-production"
+ ),
+ "auto_scaling_group_name": "public-webapp-production",
+ "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
+ "created_time": "2016-02-02T23:28:42.481000+00:00",
+ "default_cooldown": 300,
+ "desired_capacity": 2,
+ "enabled_metrics": [],
+ "health_check_grace_period": 300,
+ "health_check_type": "ELB",
+ "instances":
+ [
+ {
+ "availability_zone": "us-west-2c",
+ "health_status": "Healthy",
+ "instance_id": "i-047a12cb",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ },
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-7a29df2c",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ }
+ ],
+ "launch_config_name": "public-webapp-production-1",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_hooks":
+ [
+ {
+ "AutoScalingGroupName": "public-webapp-production-1",
+ "DefaultResult": "ABANDON",
+ "GlobalTimeout": 172800,
+ "HeartbeatTimeout": 3600,
+ "LifecycleHookName": "instance-launch",
+ "LifecycleTransition": "autoscaling:EC2_INSTANCE_LAUNCHING"
+ },
+ {
+ "AutoScalingGroupName": "public-webapp-production-1",
+ "DefaultResult": "ABANDON",
+ "GlobalTimeout": 172800,
+ "HeartbeatTimeout": 3600,
+ "LifecycleHookName": "instance-terminate",
+ "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING"
+ }
+ ],
+ "load_balancer_names": ["public-webapp-production-lb"],
+ "max_size": 4,
+ "min_size": 2,
+ "new_instances_protected_from_scale_in": false,
+ "placement_group": None,
+ "status": None,
+ "suspended_processes": [],
+ "tags":
+ [
+ {
+ "key": "Name",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "public-webapp-production"
+ },
+ {
+ "key": "env",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "production"
+ }
+ ],
+ "target_group_names": [],
+ "target_group_arns": [],
+ "termination_policies":
+ [
+ "Default"
+ ],
+ "vpc_zone_identifier":
+ [
+ "subnet-a1b1c1d1",
+ "subnet-a2b2c2d2",
+ "subnet-a3b3c3d3"
+ ]
+ }
+ ]
+ """
+
+ try:
+ asgs_paginator = conn.get_paginator('describe_auto_scaling_groups')
+ asgs = asgs_paginator.paginate().build_full_result()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups')
+
+ if not asgs:
+ return asgs
+
+ try:
+ elbv2 = module.client('elbv2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ # This is nice to have, not essential
+ elbv2 = None
+ matched_asgs = []
+
+ if name is not None:
+ # if the user didn't specify a name
+ name_prog = re.compile(r'^' + name)
+
+ for asg in asgs['AutoScalingGroups']:
+ if name:
+ matched_name = name_prog.search(asg['AutoScalingGroupName'])
+ else:
+ matched_name = True
+
+ if tags:
+ matched_tags = match_asg_tags(tags, asg)
+ else:
+ matched_tags = True
+
+ if matched_name and matched_tags:
+ asg = camel_dict_to_snake_dict(asg)
+ # compatibility with autoscaling_group module
+ if 'launch_configuration_name' in asg:
+ asg['launch_config_name'] = asg['launch_configuration_name']
+ # workaround for https://github.com/ansible/ansible/pull/25015
+ if 'target_group_ar_ns' in asg:
+ asg['target_group_arns'] = asg['target_group_ar_ns']
+ del asg['target_group_ar_ns']
+ if asg.get('target_group_arns'):
+ if elbv2:
+ try:
+ tg_paginator = elbv2.get_paginator('describe_target_groups')
+ tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result()
+ asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']]
+ except is_boto3_error_code('TargetGroupNotFound'):
+ asg['target_group_names'] = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to describe Target Groups")
+ else:
+ asg['target_group_names'] = []
+ # get asg lifecycle hooks if any
+ try:
+ asg_lifecyclehooks = conn.describe_lifecycle_hooks(AutoScalingGroupName=asg['auto_scaling_group_name'])
+ asg['lifecycle_hooks'] = asg_lifecyclehooks['LifecycleHooks']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to fetch information about ASG lifecycle hooks")
+ matched_asgs.append(asg)
+
+ return matched_asgs
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ tags=dict(type='dict'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ asg_name = module.params.get('name')
+ asg_tags = module.params.get('tags')
+
+ autoscaling = module.client('autoscaling')
+
+ results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
+ module.exit_json(results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py
new file mode 100644
index 00000000..246321b5
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_az_info
+short_description: Gather information about availability zones in AWS
+version_added: 1.0.0
+description:
+ - Gather information about availability zones in AWS.
+author: 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply.
+ - Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for possible filters.
+ - Filter names and values are case sensitive.
+ - You can use underscores instead of dashes (-) in the filter keys.
+ - Filter keys with underscores will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all availability zones
+ amazon.aws.aws_az_info:
+
+- name: Gather information about a single availability zone
+ amazon.aws.aws_az_info:
+ filters:
+ zone-name: eu-west-1a
+'''
+
+RETURN = '''
+availability_zones:
+ returned: on success
+ description: >
+ Availability zones that match the provided filters. Each element consists of a dict with all the information
+ related to that available zone.
+ type: list
+ elements: dict
+ contains:
+ state:
+ description:
+ - The state of the availability zone.
+ - The value is always C(available).
+ type: str
+ returned: on success
+ sample: 'available'
+ opt_in_status:
+ description:
+ - The opt-in status.
+ - The value is always C(opt-in-not-required) for availability zones.
+ type: str
+ returned: on success
+ sample: 'opt-in-not-required'
+ messages:
+ description: List of messages about the availability zone.
+ type: list
+ elements: dict
+ contains:
+ message:
+ description: The message about the availability zone.
+ type: str
+ returned: on success
+ sample: 'msg'
+ returned: on success
+ sample: [
+ {
+ 'message': 'message_one'
+ },
+ {
+ 'message': 'message_two'
+ }
+ ]
+ region_name:
+ description: The name of the region.
+ type: str
+ returned: on success
+ sample: 'us-east-1'
+ zone_name:
+ description: The name of the availability zone.
+ type: str
+ returned: on success
+ sample: 'us-east-1e'
+ zone_id:
+ description: The ID of the availability zone.
+ type: str
+ returned: on success
+ sample: 'use1-az5'
+ group_name:
+ description:
+ - The name of the associated group.
+ - For availability zones, this will be the same as I(region_name).
+ type: str
+ returned: on success
+ sample: 'us-east-1'
+ network_border_group:
+ description: The name of the network border group.
+ type: str
+ returned: on success
+ sample: 'us-east-1'
+ zone_type:
+ description: The type of zone.
+ type: str
+ returned: on success
+ sample: 'availability-zone'
+ sample: [
+ {
+ "group_name": "us-east-1",
+ "messages": [],
+ "network_border_group": "us-east-1",
+ "opt_in_status": "opt-in-not-required",
+ "region_name": "us-east-1",
+ "state": "available",
+ "zone_id": "use1-az6",
+ "zone_name": "us-east-1a",
+ "zone_type": "availability-zone"
+ },
+ {
+ "group_name": "us-east-1",
+ "messages": [],
+ "network_border_group": "us-east-1",
+ "opt_in_status": "opt-in-not-required",
+ "region_name": "us-east-1",
+ "state": "available",
+ "zone_id": "use1-az1",
+ "zone_name": "us-east-1b",
+ "zone_type": "availability-zone"
+ }
+ ]
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict(module.params.get('filters'))
+ for k in module.params.get('filters').keys():
+ if "_" in k:
+ sanitized_filters[k.replace('_', '-')] = sanitized_filters[k]
+ del sanitized_filters[k]
+
+ try:
+ availability_zones = connection.describe_availability_zones(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe availability zones.")
+
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
+
+ module.exit_json(availability_zones=snaked_availability_zones)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py
new file mode 100644
index 00000000..3c669160
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_caller_info
+version_added: 1.0.0
+short_description: Get information about the user and account being used to make AWS calls
+description:
+ - This module returns information about the account and user / role from which the AWS access tokens originate.
+ - The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory.
+
+author:
+ - Ed Costello (@orthanc)
+ - Stijn Dubrul (@sdubrul)
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get the current caller identity information
+ amazon.aws.aws_caller_info:
+ register: caller_info
+'''
+
+RETURN = '''
+account:
+ description: The account id the access credentials are associated with.
+ returned: success
+ type: str
+ sample: "123456789012"
+account_alias:
+ description: The account alias the access credentials are associated with.
+ returned: when caller has the iam:ListAccountAliases permission
+ type: str
+ sample: "acme-production"
+arn:
+ description: The arn identifying the user the credentials are associated with.
+ returned: success
+ type: str
+ sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name
+user_id:
+ description: |
+ The user id the access credentials are associated with. Note that this may not correspond to
+ anything you can look up in the case of roles or federated identities.
+ returned: success
+ type: str
+ sample: 123456789012:my-federated-user-name
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+
+ try:
+ caller_info = client.get_caller_identity(aws_retry=True)
+ caller_info.pop('ResponseMetadata', None)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve caller identity')
+
+ iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ try:
+ # Although a list is returned by list_account_aliases AWS supports maximum one alias per account.
+ # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias.
+ # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output
+ response = iam_client.list_account_aliases(aws_retry=True)
+ if response and response['AccountAliases']:
+ caller_info['account_alias'] = response['AccountAliases'][0]
+ else:
+ caller_info['account_alias'] = ''
+ except (BotoCoreError, ClientError):
+ # The iam:ListAccountAliases permission is required for this operation to succeed.
+ # Lacking this permission is handled gracefully by not returning the account_alias.
+ pass
+
+ module.exit_json(
+ changed=False,
+ **camel_dict_to_snake_dict(caller_info))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
new file mode 100644
index 00000000..f953a75d
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
@@ -0,0 +1,794 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation
+version_added: 1.0.0
+short_description: Create or delete an AWS CloudFormation stack
+description:
+ - Launches or updates an AWS CloudFormation stack and waits for it complete.
+options:
+ stack_name:
+ description:
+ - Name of the CloudFormation stack.
+ required: true
+ type: str
+ disable_rollback:
+ description:
+ - If a stacks fails to form, rollback will remove the stack.
+ default: false
+ type: bool
+ on_create_failure:
+ description:
+ - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option.
+ choices:
+ - DO_NOTHING
+ - ROLLBACK
+ - DELETE
+ type: str
+ create_timeout:
+ description:
+ - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
+ type: int
+ template_parameters:
+ description:
+ - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
+ - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
+ default: {}
+ type: dict
+ state:
+ description:
+ - If I(state=present), stack will be created.
+ - If I(state=present) and if stack exists and template has changed, it will be updated.
+ - If I(state=absent), stack will be removed.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ template:
+ description:
+ - The local path of the CloudFormation template.
+ - This must be the full path to the file, relative to the working directory. If using roles this may look
+ like C(roles/cloudformation/files/cloudformation-example.json).
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template),
+ I(template_body) nor I(template_url) are specified, the previous template will be reused.
+ type: path
+ notification_arns:
+ description:
+ - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events.
+ type: str
+ stack_policy:
+ description:
+ - The path of the file containing the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified.
+ for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
+ type: str
+ stack_policy_body:
+ description:
+ - The CloudFormation stack policy in JSON. A policy cannot be removed once placed, but it can be modified.
+ for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
+ type: json
+ version_added: 1.5.0
+ stack_policy_on_update_body:
+ description:
+ - the body of the cloudformation stack policy only applied during this update.
+ type: json
+ version_added: 1.5.0
+ tags:
+ description:
+ - Dictionary of tags to associate with stack and its resources during stack creation.
+ - Can be updated later, updating tags removes previous entries.
+ type: dict
+ template_url:
+ description:
+ - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an
+ S3 bucket in the same region as the stack.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified,
+ the previous template will be reused.
+ type: str
+ create_changeset:
+ description:
+ - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)."
+ - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be
+ deleted immediately with no changeset."
+ type: bool
+ default: false
+ changeset_name:
+ description:
+ - Name given to the changeset when creating a changeset.
+ - Only used when I(create_changeset=true).
+ - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters.
+ See the AWS Change Sets docs for more information
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
+ type: str
+ role_arn:
+ description:
+ - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
+ docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
+ type: str
+ termination_protection:
+ description:
+ - Enable or disable termination protection on the stack.
+ type: bool
+ template_body:
+ description:
+ - Template body. Use this to pass in the actual body of the CloudFormation template.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ type: str
+ events_limit:
+ description:
+ - Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
+ default: 200
+ type: int
+ backoff_delay:
+ description:
+ - Number of seconds to wait for the next retry.
+ default: 3
+ type: int
+ required: False
+ backoff_max_delay:
+ description:
+ - Maximum amount of time to wait between retries.
+ default: 30
+ type: int
+ required: False
+ backoff_retries:
+ description:
+ - Number of times to retry operation.
+ - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times.
+ default: 10
+ type: int
+ required: False
+ capabilities:
+ description:
+ - Specify capabilities that stack template contains.
+ - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND).
+ type: list
+ elements: str
+ default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
+
+author:
+ - "James S. Martin (@jsmartin)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: create a cloudformation stack
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ KeyName: "jmartin"
+ DiskType: "ephemeral"
+ InstanceType: "m1.small"
+ ClusterSize: 3
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Basic role example
+- name: create a stack, specify role that cloudformation assumes
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "roles/cloudformation/files/cloudformation-example.json"
+ role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
+
+- name: delete a stack
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation-old"
+ state: "absent"
+
+# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template via an URL
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template body via lookup template
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_body: "{{ lookup('template', 'cloudformation.j2') }}"
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute
+# When use_previous_value is set to True, the given value will be ignored and
+# CloudFormation will use the value from a previously submitted template.
+# If use_previous_value is set to False (default) the given value is used.
+- amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ DBSnapshotIdentifier:
+ use_previous_value: True
+ value: arn:aws:rds:es-east-1:123456789012:snapshot:rds:my-db-snapshot
+ DBName:
+ use_previous_value: True
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Enable termination protection on a stack.
+# If the stack already exists, this will update its termination protection
+- name: enable termination protection during stack creation
+ amazon.aws.cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ termination_protection: true
+
+# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
+# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
+- name: enable termination protection during stack creation
+ amazon.aws.cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ create_timeout: 5
+
+# Configure rollback behaviour on the unsuccessful creation of a stack allowing
+# CloudFormation to clean up, or do nothing in the event of an unsuccessful
+# deployment
+# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
+# it fails to create
+- name: create stack which will delete on creation failure
+ amazon.aws.cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ on_create_failure: DELETE
+'''
+
+RETURN = '''
+events:
+ type: list
+ description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
+ returned: always
+ sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
+log:
+ description: Debugging logs. Useful when modifying or finding an error.
+ returned: always
+ type: list
+ sample: ["updating stack"]
+change_set_id:
+ description: The ID of the stack change set if one was created
+ returned: I(state=present) and I(create_changeset=true)
+ type: str
+ sample: "arn:aws:cloudformation:us-east-1:123456789012:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0"
+stack_resources:
+ description: AWS stack resources and their status. List of dictionaries, one dict per resource.
+ returned: state == present
+ type: list
+ sample: [
+ {
+ "last_updated_time": "2016-10-11T19:40:14.979000+00:00",
+ "logical_resource_id": "CFTestSg",
+ "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
+ "resource_type": "AWS::EC2::SecurityGroup",
+ "status": "UPDATE_COMPLETE",
+ "status_reason": null
+ }
+ ]
+stack_outputs:
+ type: dict
+ description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
+ returned: state == present
+ sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
+''' # NOQA
+
+import json
+import time
+import traceback
+import uuid
+from hashlib import sha1
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception
+
+# Set a default, mostly for our integration tests. This will be overridden in
+# the main() loop to match the parameters we're passed
+retry_decorator = AWSRetry.jittered_backoff()
+
+
+def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
+ '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
+ ret = {'events': [], 'log': []}
+
+ try:
+ pg = cfn.get_paginator(
+ 'describe_stack_events'
+ ).paginate(
+ StackName=stack_name,
+ PaginationConfig={'MaxItems': events_limit}
+ )
+ if token_filter is not None:
+ events = list(retry_decorator(pg.search)(
+ "StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
+ ))
+ else:
+ events = list(pg.search("StackEvents[*]"))
+ except is_boto3_error_message('does not exist'):
+ ret['log'].append('Stack does not exist.')
+ return ret
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: # pylint: disable=duplicate-except
+ error_msg = boto_exception(err)
+ ret['log'].append('Unknown error: ' + str(error_msg))
+ return ret
+
+ for e in events:
+ eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
+ ret['events'].append(eventline)
+
+ if e['ResourceStatus'].endswith('FAILED'):
+ failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
+ ret['log'].append(failline)
+
+ return ret
+
+
+def create_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
+
+ # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
+ # 'OnFailure' only apply on creation, not update.
+ if module.params.get('on_create_failure') is not None:
+ stack_params['OnFailure'] = module.params['on_create_failure']
+ else:
+ stack_params['DisableRollback'] = module.params['disable_rollback']
+
+ if module.params.get('create_timeout') is not None:
+ stack_params['TimeoutInMinutes'] = module.params['create_timeout']
+ if module.params.get('termination_protection') is not None:
+ stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
+
+ try:
+ response = cfn.create_stack(aws_retry=True, **stack_params)
+ # Use stack ID to follow stack state in case of on_create_failure = DELETE
+ result = stack_operation(module, cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to create stack {0}".format(stack_params.get('StackName')))
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def list_changesets(cfn, stack_name):
+ res = cfn.list_change_sets(aws_retry=True, StackName=stack_name)
+ return [cs['ChangeSetName'] for cs in res['Summaries']]
+
+
+def create_changeset(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template' or 'template_url' is required.")
+ if module.params['changeset_name'] is not None:
+ stack_params['ChangeSetName'] = module.params['changeset_name']
+
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ changeset_name = build_changeset_name(stack_params)
+ stack_params['ChangeSetName'] = changeset_name
+
+ # Determine if this changeset already exists
+ pending_changesets = list_changesets(cfn, stack_params['StackName'])
+ if changeset_name in pending_changesets:
+ warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
+ result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
+ else:
+ cs = cfn.create_change_set(aws_retry=True, **stack_params)
+ # Make sure we don't enter an infinite loop
+ time_end = time.time() + 600
+ while time.time() < time_end:
+ try:
+ newcs = cfn.describe_change_set(aws_retry=True, ChangeSetName=cs['Id'])
+ except botocore.exceptions.BotoCoreError as err:
+ module.fail_json_aws(err)
+ if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
+ time.sleep(1)
+ elif newcs['Status'] == 'FAILED' and ("The submitted information didn't contain changes" in newcs['StatusReason']
+ or "No updates are to be performed" in newcs['StatusReason']):
+ cfn.delete_change_set(aws_retry=True, ChangeSetName=cs['Id'])
+ result = dict(changed=False,
+ output='The created Change Set did not contain any changes to this stack and was deleted.')
+ # a failed change set does not trigger any stack events so we just want to
+ # skip any further processing of result and just return it directly
+ return result
+ else:
+ break
+ # Lets not hog the cpu/spam the AWS API
+ time.sleep(1)
+ result = stack_operation(module, cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
+ result['change_set_id'] = cs['Id']
+ result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
+ 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
+ 'NOTE that dependencies on this stack might fail due to pending changes!']
+ except is_boto3_error_message('No updates are to be performed.'):
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err:
+ module.fail_json_aws(err, msg='Failed to create change set')
+
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ stack_params['UsePreviousTemplate'] = True
+
+ if module.params['stack_policy_on_update_body'] is not None:
+ stack_params['StackPolicyDuringUpdateBody'] = module.params['stack_policy_on_update_body']
+
+ # if the state is present and the stack already exists, we try to update it.
+ # AWS will tell us if the stack template and parameters are the same and
+ # don't need to be updated.
+ try:
+ cfn.update_stack(aws_retry=True, **stack_params)
+ result = stack_operation(module, cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except is_boto3_error_message('No updates are to be performed.'):
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to update stack {0}".format(stack_params.get('StackName')))
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
+ '''updates termination protection of a stack'''
+ stack = get_stack_facts(module, cfn, stack_name)
+ if stack:
+ if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
+ try:
+ cfn.update_termination_protection(
+ aws_retry=True,
+ EnableTerminationProtection=desired_termination_protection_state,
+ StackName=stack_name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+def stack_operation(module, cfn, stack_name, operation, events_limit, op_token=None):
+ '''gets the status of a stack while it is created/updated/deleted'''
+ existed = []
+ while True:
+ try:
+ stack = get_stack_facts(module, cfn, stack_name, raise_errors=True)
+ existed.append('yes')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError):
+ # If the stack previously existed, and now can't be found then it's
+ # been deleted successfully.
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ if not stack:
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
+ return ret
+ # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
+ # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
+ elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
+ ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
+ return ret
+ elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
+ return ret
+ # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
+ elif stack['StackStatus'].endswith('_COMPLETE'):
+ ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
+ return ret
+ elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
+ return ret
+ # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
+ elif stack['StackStatus'].endswith('_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
+ return ret
+ else:
+ # this can loop forever :/
+ time.sleep(5)
+ return {'failed': True, 'output': 'Failed for unknown reasons.'}
+
+
+def build_changeset_name(stack_params):
+ if 'ChangeSetName' in stack_params:
+ return stack_params['ChangeSetName']
+
+ json_params = json.dumps(stack_params, sort_keys=True)
+
+ return 'Ansible-{0}-{1}'.format(
+ stack_params['StackName'],
+ sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
+ )
+
+
+def check_mode_changeset(module, stack_params, cfn):
+ """Create a change set, describe it and delete it before returning check mode outputs."""
+ stack_params['ChangeSetName'] = build_changeset_name(stack_params)
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ change_set = cfn.create_change_set(aws_retry=True, **stack_params)
+ for _i in range(60): # total time 5 min
+ description = cfn.describe_change_set(aws_retry=True, ChangeSetName=change_set['Id'])
+ if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
+ break
+ time.sleep(5)
+ else:
+ # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
+ module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
+
+ cfn.delete_change_set(aws_retry=True, ChangeSetName=change_set['Id'])
+
+ reason = description.get('StatusReason')
+
+ if description['Status'] == 'FAILED' and ("didn't contain changes" in reason or "No updates are to be performed" in reason):
+ return {'changed': False, 'msg': reason, 'meta': reason}
+ return {'changed': True, 'msg': reason, 'meta': description['Changes']}
+
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ module.fail_json_aws(err)
+
+
+def get_stack_facts(module, cfn, stack_name, raise_errors=False):
+ try:
+ stack_response = cfn.describe_stacks(aws_retry=True, StackName=stack_name)
+ stack_info = stack_response['Stacks'][0]
+ except is_boto3_error_message('does not exist'):
+ return None
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: # pylint: disable=duplicate-except
+ if raise_errors:
+ raise err
+ module.fail_json_aws(err, msg="Failed to describe stack")
+
+ if stack_response and stack_response.get('Stacks', None):
+ stacks = stack_response['Stacks']
+ if len(stacks):
+ stack_info = stacks[0]
+
+ return stack_info
+
+
+def main():
+ argument_spec = dict(
+ stack_name=dict(required=True),
+ template_parameters=dict(required=False, type='dict', default={}),
+ state=dict(default='present', choices=['present', 'absent']),
+ template=dict(default=None, required=False, type='path'),
+ notification_arns=dict(default=None, required=False),
+ stack_policy=dict(default=None, required=False),
+ stack_policy_body=dict(default=None, required=False, type='json'),
+ stack_policy_on_update_body=dict(default=None, required=False, type='json'),
+ disable_rollback=dict(default=False, type='bool'),
+ on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
+ create_timeout=dict(default=None, type='int'),
+ template_url=dict(default=None, required=False),
+ template_body=dict(default=None, required=False),
+ create_changeset=dict(default=False, type='bool'),
+ changeset_name=dict(default=None, required=False),
+ role_arn=dict(default=None, required=False),
+ tags=dict(default=None, type='dict'),
+ termination_protection=dict(default=None, type='bool'),
+ events_limit=dict(default=200, type='int'),
+ backoff_retries=dict(type='int', default=10, required=False),
+ backoff_delay=dict(type='int', default=3, required=False),
+ backoff_max_delay=dict(type='int', default=30, required=False),
+ capabilities=dict(type='list', elements='str', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['template_url', 'template', 'template_body'],
+ ['disable_rollback', 'on_create_failure']],
+ supports_check_mode=True
+ )
+
+ invalid_capabilities = []
+ user_capabilities = module.params.get('capabilities')
+ for user_cap in user_capabilities:
+ if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
+ invalid_capabilities.append(user_cap)
+
+ if invalid_capabilities:
+ module.fail_json(msg="Specified capabilities are invalid : %r,"
+ " please check documentation for valid capabilities" % invalid_capabilities)
+
+ # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
+ stack_params = {
+ 'Capabilities': user_capabilities,
+ 'ClientRequestToken': to_native(uuid.uuid4()),
+ }
+ state = module.params['state']
+ stack_params['StackName'] = module.params['stack_name']
+
+ if module.params['template'] is not None:
+ with open(module.params['template'], 'r') as template_fh:
+ stack_params['TemplateBody'] = template_fh.read()
+ elif module.params['template_body'] is not None:
+ stack_params['TemplateBody'] = module.params['template_body']
+ elif module.params['template_url'] is not None:
+ stack_params['TemplateURL'] = module.params['template_url']
+
+ if module.params.get('notification_arns'):
+ stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
+ else:
+ stack_params['NotificationARNs'] = []
+
+ # can't check the policy when verifying.
+ if module.params['stack_policy_body'] is not None and not module.check_mode and not module.params['create_changeset']:
+ stack_params['StackPolicyBody'] = module.params['stack_policy_body']
+ elif module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
+ with open(module.params['stack_policy'], 'r') as stack_policy_fh:
+ stack_params['StackPolicyBody'] = stack_policy_fh.read()
+
+ template_parameters = module.params['template_parameters']
+
+ stack_params['Parameters'] = []
+ for k, v in template_parameters.items():
+ if isinstance(v, dict):
+ # set parameter based on a dict to allow additional CFN Parameter Attributes
+ param = dict(ParameterKey=k)
+
+ if 'value' in v:
+ param['ParameterValue'] = str(v['value'])
+
+ if 'use_previous_value' in v and bool(v['use_previous_value']):
+ param['UsePreviousValue'] = True
+ param.pop('ParameterValue', None)
+
+ stack_params['Parameters'].append(param)
+ else:
+ # allow default k/v configuration to set a template parameter
+ stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
+
+ if isinstance(module.params.get('tags'), dict):
+ stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
+
+ if module.params.get('role_arn'):
+ stack_params['RoleARN'] = module.params['role_arn']
+
+ result = {}
+
+ # Wrap the cloudformation client methods that this module uses with
+ # automatic backoff / retry for throttling error codes
+ retry_decorator = AWSRetry.jittered_backoff(
+ retries=module.params.get('backoff_retries'),
+ delay=module.params.get('backoff_delay'),
+ max_delay=module.params.get('backoff_max_delay')
+ )
+ cfn = module.client('cloudformation', retry_decorator=retry_decorator)
+
+ stack_info = get_stack_facts(module, cfn, stack_params['StackName'])
+
+ if module.check_mode:
+ if state == 'absent' and stack_info:
+ module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
+ elif state == 'absent' and not stack_info:
+ module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
+ elif state == 'present' and not stack_info:
+ module.exit_json(changed=True, msg='New stack would be created', meta=[])
+ else:
+ module.exit_json(**check_mode_changeset(module, stack_params, cfn))
+
+ if state == 'present':
+ if not stack_info:
+ result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
+ elif module.params.get('create_changeset'):
+ result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
+ else:
+ if module.params.get('termination_protection') is not None:
+ update_termination_protection(module, cfn, stack_params['StackName'],
+ bool(module.params.get('termination_protection')))
+ result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
+
+ # format the stack output
+
+ stack = get_stack_facts(module, cfn, stack_params['StackName'])
+ if stack is not None:
+ if result.get('stack_outputs') is None:
+ # always define stack_outputs, but it may be empty
+ result['stack_outputs'] = {}
+ for output in stack.get('Outputs', []):
+ result['stack_outputs'][output['OutputKey']] = output['OutputValue']
+ stack_resources = []
+ reslist = cfn.list_stack_resources(aws_retry=True, StackName=stack_params['StackName'])
+ for res in reslist.get('StackResourceSummaries', []):
+ stack_resources.append({
+ "logical_resource_id": res['LogicalResourceId'],
+ "physical_resource_id": res.get('PhysicalResourceId', ''),
+ "resource_type": res['ResourceType'],
+ "last_updated_time": res['LastUpdatedTimestamp'],
+ "status": res['ResourceStatus'],
+ "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
+ })
+ result['stack_resources'] = stack_resources
+
+ elif state == 'absent':
+ # absent state is different because of the way delete_stack works.
+ # problem is it it doesn't give an error if stack isn't found
+ # so must describe the stack first
+
+ try:
+ stack = get_stack_facts(module, cfn, stack_params['StackName'])
+ if not stack:
+ result = {'changed': False, 'output': 'Stack not found.'}
+ else:
+ if stack_params.get('RoleARN') is None:
+ cfn.delete_stack(aws_retry=True, StackName=stack_params['StackName'])
+ else:
+ cfn.delete_stack(aws_retry=True, StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
+ result = stack_operation(module, cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
+ stack_params.get('ClientRequestToken', None))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err:
+ module.fail_json_aws(err)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py
new file mode 100644
index 00000000..89ba80bf
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py
@@ -0,0 +1,461 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation_info
+version_added: 1.0.0
+short_description: Obtain information about an AWS CloudFormation stack
+description:
+ - Gets information about an AWS CloudFormation stack.
+author:
+ - Justin Menga (@jmenga)
+ - Kevin Coming (@waffie1)
+options:
+ stack_name:
+ description:
+ - The name or id of the CloudFormation stack. Gathers information on all stacks by default.
+ type: str
+ all_facts:
+ description:
+ - Get all stack information for the stack.
+ type: bool
+ default: false
+ stack_events:
+ description:
+ - Get stack events for the stack.
+ type: bool
+ default: false
+ stack_template:
+ description:
+ - Get stack template body for the stack.
+ type: bool
+ default: false
+ stack_resources:
+ description:
+ - Get stack resources for the stack.
+ type: bool
+ default: false
+ stack_policy:
+ description:
+ - Get stack policy for the stack.
+ type: bool
+ default: false
+ stack_change_sets:
+ description:
+ - Get stack change sets for the stack
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get information on all stacks
+ amazon.aws.cloudformation_info:
+ register: all_stacks_output
+
+- name: Get summary information about a stack
+ amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ register: output
+
+- debug:
+ msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}"
+
+# Get stack outputs, when you have the stack name available as a fact
+- set_fact:
+ stack_name: my-awesome-stack
+
+- amazon.aws.cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: my_stack
+
+- debug:
+ msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}"
+
+# Get all stack information about a stack
+- amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ all_facts: true
+
+# Get stack resource and stack policy information about a stack
+- amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ stack_resources: true
+ stack_policy: true
+
+# Fail if the stack doesn't exist
+- name: try to get info about a stack but fail if it doesn't exist
+ amazon.aws.cloudformation_info:
+ stack_name: nonexistent-stack
+ all_facts: true
+ failed_when: cloudformation['nonexistent-stack'] is undefined
+'''
+
+RETURN = '''
+cloudformation:
+ description:
+ - Dictionary of dictionaries containing info of stack(s).
+ - Keys are I(stack_name)s.
+ returned: always
+ type: dict
+ contains:
+ stack_description:
+ description: Summary facts about the stack.
+ returned: if the stack exists
+ type: dict
+ contains:
+ capabilities:
+ description: The capabilities allowed in the stack.
+ returned: always
+ type: list
+ elements: str
+ creation_time:
+ description: The time at which the stack was created.
+ returned: if stack exists
+ type: str
+ deletion_time:
+ description: The time at which the stack was deleted.
+ returned: if stack was deleted
+ type: str
+ description:
+ description: The user-defined description associated with the stack.
+ returned: always
+ type: str
+ disable_rollback:
+ description: Whether or not rollback on stack creation failures is enabled.
+ returned: always
+ type: bool
+ drift_information:
+ description: Information about whether a stack's actual configuration differs, or has drifted, from it's expected configuration,
+ as defined in the stack template and any values specified as template parameters.
+ returned: always
+ type: dict
+ contains:
+ stack_drift_status:
+ description: Status of the stack's actual configuration compared to its expected template configuration.
+ returned: always
+ type: str
+ last_check_timestamp:
+ description: Most recent time when a drift detection operation was initiated on the stack,
+ or any of its individual resources that support drift detection.
+ returned: if a drift was detected
+ type: str
+ enable_termination_protection:
+ description: Whether termination protection is enabled for the stack.
+ returned: always
+ type: bool
+ notification_arns:
+ description: Amazon SNS topic ARNs to which stack related events are published.
+ returned: always
+ type: list
+ elements: str
+ outputs:
+ description: A list of output dicts.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ output_key:
+ description: The key associated with the output.
+ returned: always
+ type: str
+ output_value:
+ description: The value associated with the output.
+ returned: always
+ type: str
+ parameters:
+ description: A list of parameter dicts.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ parameter_key:
+ description: The key associated with the parameter.
+ returned: always
+ type: str
+ parameter_value:
+ description: The value associated with the parameter.
+ returned: always
+ type: str
+ rollback_configuration:
+ description: The rollback triggers for CloudFormation to monitor during stack creation and updating operations.
+ returned: always
+ type: dict
+ contains:
+ rollback_triggers:
+ description: The triggers to monitor during stack creation or update actions.
+ returned: when rollback triggers exist
+ type: list
+ elements: dict
+ contains:
+ arn:
+ description: The ARN of the rollback trigger.
+ returned: always
+ type: str
+ type:
+ description: The resource type of the rollback trigger.
+ returned: always
+ type: str
+ stack_id:
+ description: The unique ID of the stack.
+ returned: always
+ type: str
+ stack_name:
+ description: The name of the stack.
+ returned: always
+ type: str
+ stack_status:
+ description: The status of the stack.
+ returned: always
+ type: str
+ tags:
+ description: A list of tags associated with the stack.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: Key of tag.
+ returned: always
+ type: str
+ value:
+ description: Value of tag.
+ returned: always
+ type: str
+ stack_outputs:
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
+ output 'OutputValue' parameter.
+ returned: if the stack exists
+ type: dict
+ sample: { ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com }
+ stack_parameters:
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
+ each parameter 'ParameterValue' parameter.
+ returned: if the stack exists
+ type: dict
+ sample:
+ {
+ DatabaseEngine: mysql,
+ DatabasePassword: "***"
+ }
+ stack_events:
+ description: All stack events for the stack.
+ returned: only if all_facts or stack_events is true and the stack exists
+ type: list
+ stack_policy:
+ description: Describes the stack policy for the stack.
+ returned: only if all_facts or stack_policy is true and the stack exists
+ type: dict
+ stack_template:
+ description: Describes the stack template for the stack.
+ returned: only if all_facts or stack_template is true and the stack exists
+ type: dict
+ stack_resource_list:
+ description: Describes stack resources for the stack.
+ returned: only if all_facts or stack_resources is true and the stack exists
+ type: list
+ stack_resources:
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
+ resource 'PhysicalResourceId' parameter.
+ returned: only if all_facts or stack_resources is true and the stack exists
+ type: dict
+ sample: {
+ "AutoScalingGroup": "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7",
+ "AutoScalingSecurityGroup": "sg-abcd1234",
+ "ApplicationDatabase": "dazvlpr01xj55a"
+ }
+ stack_change_sets:
+ description: A list of stack change sets. Each item in the list represents the details of a specific changeset.
+ returned: only if all_facts or stack_change_sets is true and the stack exists
+ type: list
+ stack_tags:
+ description: Dictionary of key value pairs of tags.
+ returned: only if all_facts or stack_resources is true and the stack exists
+ type: dict
+ sample: {
+ 'TagOne': 'ValueOne',
+ 'TagTwo': 'ValueTwo'
+ }
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+class CloudFormationServiceManager:
+ """Handles CloudFormation Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudformation')
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stacks_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_stacks')
+ return paginator.paginate(**kwargs).build_full_result()['Stacks']
+
+ def describe_stacks(self, stack_name=None):
+ try:
+ kwargs = {'StackName': stack_name} if stack_name else {}
+ response = self.describe_stacks_with_backoff(**kwargs)
+ if response is not None:
+ return response
+ self.module.fail_json(msg="Error describing stack(s) - an empty response was returned")
+ except is_boto3_error_message('does not exist'):
+ return {}
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Error describing stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_resources_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_stack_resources')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries']
+
+ def list_stack_resources(self, stack_name):
+ try:
+ return self.list_stack_resources_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_events_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('describe_stack_events')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents']
+
+ def describe_stack_events(self, stack_name):
+ try:
+ return self.describe_stack_events_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_change_sets_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_change_sets')
+ return paginator.paginate(StackName=stack_name).build_full_result()['Summaries']
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_change_set_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_change_set')
+ return paginator.paginate(**kwargs).build_full_result()
+
+ def describe_stack_change_sets(self, stack_name):
+ changes = []
+ try:
+ change_sets = self.list_stack_change_sets_with_backoff(stack_name)
+ for item in change_sets:
+ changes.append(self.describe_stack_change_set_with_backoff(
+ StackName=stack_name,
+ ChangeSetName=item['ChangeSetName']))
+ return changes
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_stack_policy_with_backoff(self, stack_name):
+ return self.client.get_stack_policy(StackName=stack_name)
+
+ def get_stack_policy(self, stack_name):
+ try:
+ response = self.get_stack_policy_with_backoff(stack_name)
+ stack_policy = response.get('StackPolicyBody')
+ if stack_policy:
+ return json.loads(stack_policy)
+ return dict()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_template_with_backoff(self, stack_name):
+ return self.client.get_template(StackName=stack_name)
+
+ def get_template(self, stack_name):
+ try:
+ response = self.get_template_with_backoff(stack_name)
+ return response.get('TemplateBody')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name)
+
+
+def to_dict(items, key, value):
+ ''' Transforms a list of items to a Key/Value dictionary '''
+ if items:
+ return dict(zip([i.get(key) for i in items], [i.get(value) for i in items]))
+ else:
+ return dict()
+
+
+def main():
+ argument_spec = dict(
+ stack_name=dict(),
+ all_facts=dict(required=False, default=False, type='bool'),
+ stack_policy=dict(required=False, default=False, type='bool'),
+ stack_events=dict(required=False, default=False, type='bool'),
+ stack_resources=dict(required=False, default=False, type='bool'),
+ stack_template=dict(required=False, default=False, type='bool'),
+ stack_change_sets=dict(required=False, default=False, type='bool'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ service_mgr = CloudFormationServiceManager(module)
+
+ result = {'cloudformation': {}}
+
+ for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
+ facts = {'stack_description': stack_description}
+ stack_name = stack_description.get('StackName')
+
+ # Create stack output and stack parameter dictionaries
+ if facts['stack_description']:
+ facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
+ facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'),
+ 'ParameterKey', 'ParameterValue')
+ facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
+
+ # Create optional stack outputs
+ all_facts = module.params.get('all_facts')
+ if all_facts or module.params.get('stack_resources'):
+ facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
+ facts['stack_resources'] = to_dict(facts.get('stack_resource_list'),
+ 'LogicalResourceId', 'PhysicalResourceId')
+ if all_facts or module.params.get('stack_template'):
+ facts['stack_template'] = service_mgr.get_template(stack_name)
+ if all_facts or module.params.get('stack_policy'):
+ facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
+ if all_facts or module.params.get('stack_events'):
+ facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
+ if all_facts or module.params.get('stack_change_sets'):
+ facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
+
+ result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs',
+ 'stack_parameters',
+ 'stack_policy',
+ 'stack_resources',
+ 'stack_tags',
+ 'stack_template'))
+ module.exit_json(changed=False, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py
new file mode 100644
index 00000000..8ad1cd8b
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py
@@ -0,0 +1,641 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudtrail
+version_added: 5.0.0
+short_description: manage CloudTrail create, delete, update
+description:
+ - Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - Ansible Core Team
+ - Ted Timmons (@tedder)
+ - Daniel Shepherd (@shepdelacreme)
+options:
+ state:
+ description:
+ - Add or remove CloudTrail configuration.
+ - 'The following states have been preserved for backwards compatibility: I(state=enabled) and I(state=disabled).'
+ - I(state=enabled) is equivalet to I(state=present).
+ - I(state=disabled) is equivalet to I(state=absent).
+ type: str
+ choices: ['present', 'absent', 'enabled', 'disabled']
+ default: present
+ name:
+ description:
+ - Name for the CloudTrail.
+ - Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account.
+ type: str
+ default: default
+ enable_logging:
+ description:
+ - Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files.
+ default: true
+ type: bool
+ s3_bucket_name:
+ description:
+ - An existing S3 bucket where CloudTrail will deliver log files.
+ - This bucket should exist and have the proper policy.
+ - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html).
+ - Required when I(state=present).
+ type: str
+ s3_key_prefix:
+ description:
+ - S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed.
+ type: str
+ is_multi_region_trail:
+ description:
+ - Specify whether the trail belongs only to one region or exists in all regions.
+ default: false
+ type: bool
+ enable_log_file_validation:
+ description:
+ - Specifies whether log file integrity validation is enabled.
+ - CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered.
+ type: bool
+ aliases: [ "log_file_validation_enabled" ]
+ include_global_events:
+ description:
+ - Record API calls from global services such as IAM and STS.
+ default: true
+ type: bool
+ aliases: [ "include_global_service_events" ]
+ sns_topic_name:
+ description:
+ - SNS Topic name to send notifications to when a log file is delivered.
+ type: str
+ cloudwatch_logs_role_arn:
+ description:
+ - Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group.
+ - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
+ - Required when C(cloudwatch_logs_log_group_arn).
+ type: str
+ cloudwatch_logs_log_group_arn:
+ description:
+ - A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist.
+ - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
+ - Required when C(cloudwatch_logs_role_arn).
+ type: str
+ kms_key_id:
+ description:
+ - Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption.
+ - The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
+ - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html).
+ type: str
+notes:
+ - The I(purge_tags) option was added in release 4.0.0
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: create single region cloudtrail
+ amazon.aws.cloudtrail:
+ state: present
+ name: default
+ s3_bucket_name: mylogbucket
+ s3_key_prefix: cloudtrail
+ region: us-east-1
+
+- name: create multi-region trail with validation and tags
+ amazon.aws.cloudtrail:
+ state: present
+ name: default
+ s3_bucket_name: mylogbucket
+ region: us-east-1
+ is_multi_region_trail: true
+ enable_log_file_validation: true
+ cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role"
+ cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*"
+ kms_key_id: "alias/MyAliasName"
+ tags:
+ environment: dev
+ Name: default
+
+- name: show another valid kms_key_id
+ amazon.aws.cloudtrail:
+ state: present
+ name: default
+ s3_bucket_name: mylogbucket
+ kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
+ # simply "12345678-1234-1234-1234-123456789012" would be valid too.
+
+- name: pause logging the trail we just created
+ amazon.aws.cloudtrail:
+ state: present
+ name: default
+ enable_logging: false
+ s3_bucket_name: mylogbucket
+ region: us-east-1
+ is_multi_region_trail: true
+ enable_log_file_validation: true
+ tags:
+ environment: dev
+ Name: default
+
+- name: delete a trail
+ amazon.aws.cloudtrail:
+ state: absent
+ name: default
+'''
+
+RETURN = '''
+exists:
+ description: whether the resource exists
+ returned: always
+ type: bool
+ sample: true
+trail:
+ description: CloudTrail resource details
+ returned: always
+ type: complex
+ sample: hash/dictionary of values
+ contains:
+ trail_arn:
+ description: Full ARN of the CloudTrail resource
+ returned: success
+ type: str
+ sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default
+ name:
+ description: Name of the CloudTrail resource
+ returned: success
+ type: str
+ sample: default
+ is_logging:
+ description: Whether logging is turned on or paused for the Trail
+ returned: success
+ type: bool
+ sample: True
+ s3_bucket_name:
+ description: S3 bucket name where log files are delivered
+ returned: success
+ type: str
+ sample: myBucket
+ s3_key_prefix:
+ description: Key prefix in bucket where log files are delivered (if any)
+ returned: success when present
+ type: str
+ sample: myKeyPrefix
+ log_file_validation_enabled:
+ description: Whether log file validation is enabled on the trail
+ returned: success
+ type: bool
+ sample: true
+ include_global_service_events:
+ description: Whether global services (IAM, STS) are logged with this trail
+ returned: success
+ type: bool
+ sample: true
+ is_multi_region_trail:
+ description: Whether the trail applies to all regions or just one
+ returned: success
+ type: bool
+ sample: true
+ has_custom_event_selectors:
+ description: Whether any custom event selectors are used for this trail.
+ returned: success
+ type: bool
+ sample: False
+ home_region:
+ description: The home region where the trail was originally created and must be edited.
+ returned: success
+ type: str
+ sample: us-east-1
+ sns_topic_name:
+ description: The SNS topic name where log delivery notifications are sent.
+ returned: success when present
+ type: str
+ sample: myTopic
+ sns_topic_arn:
+ description: Full ARN of the SNS topic where log delivery notifications are sent.
+ returned: success when present
+ type: str
+ sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic
+ cloud_watch_logs_log_group_arn:
+ description: Full ARN of the CloudWatch Logs log group where events are delivered.
+ returned: success when present
+ type: str
+ sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*
+ cloud_watch_logs_role_arn:
+ description: Full ARN of the IAM role that CloudTrail assumes to deliver events.
+ returned: success when present
+ type: str
+ sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role
+ kms_key_id:
+ description: Full ARN of the KMS Key used to encrypt log files.
+ returned: success when present
+ type: str
+ sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012
+ tags:
+ description: hash/dictionary of tags applied to this resource
+ returned: success
+ type: dict
+ sample: {'environment': 'dev', 'Name': 'default'}
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+
+def get_kms_key_aliases(module, client, keyId):
+ """
+ get list of key aliases
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object for kms
+ keyId : keyId to get aliases for
+ """
+ try:
+ key_resp = client.list_aliases(KeyId=keyId)
+ except (BotoCoreError, ClientError):
+ # Don't fail here, just return [] to maintain backwards compat
+ # in case user doesn't have kms:ListAliases permissions
+ return []
+
+ return key_resp['Aliases']
+
+
+def create_trail(module, client, ct_params):
+ """
+ Creates a CloudTrail
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ ct_params : The parameters for the Trail to create
+ """
+ resp = {}
+ try:
+ resp = client.create_trail(**ct_params)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to create Trail")
+
+ return resp
+
+
+def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True):
+ """
+ Creates, updates, removes tags on a CloudTrail resource
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ tags : Dict of tags converted from ansible_dict to boto3 list of dicts
+ trail_arn : The ARN of the CloudTrail to operate on
+ curr_tags : Dict of the current tags on resource, if any
+ dry_run : true/false to determine if changes will be made if needed
+ """
+
+ if tags is None:
+ return False
+
+ curr_tags = curr_tags or {}
+
+ tags_to_add, tags_to_remove = compare_aws_tags(curr_tags, tags, purge_tags=purge_tags)
+ if not tags_to_add and not tags_to_remove:
+ return False
+
+ if module.check_mode:
+ return True
+
+ if tags_to_remove:
+ remove = {k: curr_tags[k] for k in tags_to_remove}
+ tags_to_remove = ansible_dict_to_boto3_tag_list(remove)
+ try:
+ client.remove_tags(ResourceId=trail_arn, TagsList=tags_to_remove)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to remove tags from Trail")
+
+ if tags_to_add:
+ tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_add)
+ try:
+ client.add_tags(ResourceId=trail_arn, TagsList=tags_to_add)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to add tags to Trail")
+
+ return True
+
+
+def get_tag_list(keys, tags):
+ """
+ Returns a list of dicts with tags to act on
+ keys : set of keys to get the values for
+ tags : the dict of tags to turn into a list
+ """
+ tag_list = []
+ for k in keys:
+ tag_list.append({'Key': k, 'Value': tags[k]})
+
+ return tag_list
+
+
+def set_logging(module, client, name, action):
+ """
+ Starts or stops logging based on given state
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ name : The name or ARN of the CloudTrail to operate on
+ action : start or stop
+ """
+ if action == 'start':
+ try:
+ client.start_logging(Name=name)
+ return client.get_trail_status(Name=name)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to start logging")
+ elif action == 'stop':
+ try:
+ client.stop_logging(Name=name)
+ return client.get_trail_status(Name=name)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to stop logging")
+ else:
+ module.fail_json(msg="Unsupported logging action")
+
+
+def get_trail_facts(module, client, name):
+ """
+ Describes existing trail in an account
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ name : Name of the trail
+ """
+ # get Trail info
+ try:
+ trail_resp = client.describe_trails(trailNameList=[name])
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to describe Trail")
+
+ # Now check to see if our trail exists and get status and tags
+ if len(trail_resp['trailList']):
+ trail = trail_resp['trailList'][0]
+ try:
+ status_resp = client.get_trail_status(Name=trail['Name'])
+ tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']])
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to describe Trail")
+
+ trail['IsLogging'] = status_resp['IsLogging']
+ trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList'])
+ # Check for non-existent values and populate with None
+ optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId'])
+ for v in optional_vals - set(trail.keys()):
+ trail[v] = None
+ return trail
+
+ else:
+ # trail doesn't exist return None
+ return None
+
+
+def delete_trail(module, client, trail_arn):
+ """
+ Delete a CloudTrail
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ trail_arn : Full CloudTrail ARN
+ """
+ try:
+ client.delete_trail(Name=trail_arn)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to delete Trail")
+
+
+def update_trail(module, client, ct_params):
+ """
+ Delete a CloudTrail
+
+ module : AnsibleAWSModule object
+ client : boto3 client connection object
+ ct_params : The parameters for the Trail to update
+ """
+ try:
+ client.update_trail(**ct_params)
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to update Trail")
+
+
+def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ name=dict(default='default'),
+ enable_logging=dict(default=True, type='bool'),
+ s3_bucket_name=dict(),
+ s3_key_prefix=dict(no_log=False),
+ sns_topic_name=dict(),
+ is_multi_region_trail=dict(default=False, type='bool'),
+ enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']),
+ include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']),
+ cloudwatch_logs_role_arn=dict(),
+ cloudwatch_logs_log_group_arn=dict(),
+ kms_key_id=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool')
+ )
+
+ required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
+ required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
+
+ # collect parameters
+ if module.params['state'] in ('present', 'enabled'):
+ state = 'present'
+ elif module.params['state'] in ('absent', 'disabled'):
+ state = 'absent'
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+ enable_logging = module.params['enable_logging']
+ ct_params = dict(
+ Name=module.params['name'],
+ S3BucketName=module.params['s3_bucket_name'],
+ IncludeGlobalServiceEvents=module.params['include_global_events'],
+ IsMultiRegionTrail=module.params['is_multi_region_trail'],
+ )
+
+ if module.params['s3_key_prefix']:
+ ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
+
+ if module.params['sns_topic_name']:
+ ct_params['SnsTopicName'] = module.params['sns_topic_name']
+
+ if module.params['cloudwatch_logs_role_arn']:
+ ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
+
+ if module.params['cloudwatch_logs_log_group_arn']:
+ ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
+
+ if module.params['enable_log_file_validation'] is not None:
+ ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation']
+
+ if module.params['kms_key_id']:
+ ct_params['KmsKeyId'] = module.params['kms_key_id']
+
+ client = module.client('cloudtrail')
+ region = module.region
+
+ results = dict(
+ changed=False,
+ exists=False
+ )
+
+ # Get existing trail facts
+ trail = get_trail_facts(module, client, ct_params['Name'])
+
+ # If the trail exists set the result exists variable
+ if trail is not None:
+ results['exists'] = True
+ initial_kms_key_id = trail.get('KmsKeyId')
+
+ if state == 'absent' and results['exists']:
+ # If Trail exists go ahead and delete
+ results['changed'] = True
+ results['exists'] = False
+ results['trail'] = dict()
+ if not module.check_mode:
+ delete_trail(module, client, trail['TrailARN'])
+
+ elif state == 'present' and results['exists']:
+ # If Trail exists see if we need to update it
+ do_update = False
+ for key in ct_params:
+ tkey = str(key)
+ # boto3 has inconsistent parameter naming so we handle it here
+ if key == 'EnableLogFileValidation':
+ tkey = 'LogFileValidationEnabled'
+ # We need to make an empty string equal None
+ if ct_params.get(key) == '':
+ val = None
+ else:
+ val = ct_params.get(key)
+ if val != trail.get(tkey):
+ do_update = True
+ if tkey != 'KmsKeyId':
+ # We'll check if the KmsKeyId casues changes later since
+ # user could've provided a key alias, alias arn, or key id
+ # and trail['KmsKeyId'] is always a key arn
+ results['changed'] = True
+ # If we are in check mode copy the changed values to the trail facts in result output to show what would change.
+ if module.check_mode:
+ trail.update({tkey: ct_params.get(key)})
+
+ if not module.check_mode and do_update:
+ update_trail(module, client, ct_params)
+ trail = get_trail_facts(module, client, ct_params['Name'])
+
+ # Determine if KmsKeyId changed
+ if not module.check_mode:
+ if initial_kms_key_id != trail.get('KmsKeyId'):
+ results['changed'] = True
+ else:
+ new_key = ct_params.get('KmsKeyId')
+ if initial_kms_key_id != new_key:
+ # Assume changed for a moment
+ results['changed'] = True
+
+ # However, new_key could be a key id, alias arn, or alias name
+ # that maps back to the key arn in initial_kms_key_id. So check
+ # all aliases for a match.
+ initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id)
+ for a in initial_aliases:
+ if a['AliasName'] == new_key or a['AliasArn'] == new_key or a['TargetKeyId'] == new_key:
+ results['changed'] = False
+
+ # Check if we need to start/stop logging
+ if enable_logging and not trail['IsLogging']:
+ results['changed'] = True
+ trail['IsLogging'] = True
+ if not module.check_mode:
+ set_logging(module, client, name=ct_params['Name'], action='start')
+ if not enable_logging and trail['IsLogging']:
+ results['changed'] = True
+ trail['IsLogging'] = False
+ if not module.check_mode:
+ set_logging(module, client, name=ct_params['Name'], action='stop')
+
+ # Check if we need to update tags on resource
+ tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'],
+ purge_tags=purge_tags)
+ if tags_changed:
+ updated_tags = dict()
+ if not purge_tags:
+ updated_tags = trail['tags']
+ updated_tags.update(tags)
+ results['changed'] = True
+ trail['tags'] = updated_tags
+
+ # Populate trail facts in output
+ results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags'])
+
+ elif state == 'present' and not results['exists']:
+ # Trail doesn't exist just go create it
+ results['changed'] = True
+ results['exists'] = True
+ if not module.check_mode:
+ if tags:
+ ct_params['TagsList'] = ansible_dict_to_boto3_tag_list(tags)
+ # If we aren't in check_mode then actually create it
+ created_trail = create_trail(module, client, ct_params)
+ # Get the trail status
+ try:
+ status_resp = client.get_trail_status(Name=created_trail['Name'])
+ except (BotoCoreError, ClientError) as err:
+ module.fail_json_aws(err, msg="Failed to fetch Trail statuc")
+ # Set the logging state for the trail to desired value
+ if enable_logging and not status_resp['IsLogging']:
+ set_logging(module, client, name=ct_params['Name'], action='start')
+ if not enable_logging and status_resp['IsLogging']:
+ set_logging(module, client, name=ct_params['Name'], action='stop')
+ # Get facts for newly created Trail
+ trail = get_trail_facts(module, client, ct_params['Name'])
+
+ # If we are in check mode create a fake return structure for the newly minted trail
+ if module.check_mode:
+ acct_id = '123456789012'
+ try:
+ sts_client = module.client('sts')
+ acct_id = sts_client.get_caller_identity()['Account']
+ except (BotoCoreError, ClientError):
+ pass
+ trail = dict()
+ trail.update(ct_params)
+ if 'EnableLogFileValidation' not in ct_params:
+ ct_params['EnableLogFileValidation'] = False
+ trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation']
+ trail.pop('EnableLogFileValidation')
+ fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
+ trail['HasCustomEventSelectors'] = False
+ trail['HomeRegion'] = region
+ trail['TrailARN'] = fake_arn
+ trail['IsLogging'] = enable_logging
+ trail['tags'] = tags
+ # Populate trail facts in output
+ results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags'])
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py
new file mode 100644
index 00000000..a696ba71
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudtrail_info
+version_added: 5.0.0
+short_description: Gather information about trails in AWS Cloud Trail.
+description:
+ - Gather information about trails in AWS CloudTrail.
+author: "Gomathi Selvi Srinivasan (@GomathiselviS)"
+options:
+ trail_names:
+ type: list
+ elements: str
+ description:
+ - Specifies a list of trail names, trail ARNs, or both, of the trails to describe.
+ - If an empty list is specified, information for the trail in the current region is returned.
+ include_shadow_trails:
+ type: bool
+ default: true
+ description: Specifies whether to include shadow trails in the response.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all trails
+- amazon.aws.cloudtrail_info:
+
+# Gather information about a particular trail
+- amazon.aws.cloudtrail_info:
+ trail_names:
+ - arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
+
+'''
+
+RETURN = '''
+trail_list:
+ description: List of trail objects. Each element consists of a dict with all the information related to that cloudtrail.
+ type: list
+ elements: dict
+ returned: always
+ contains:
+ name:
+ description: Name of the trail.
+ type: str
+ sample: "MyTrail"
+ s3_bucket_name:
+ description: Name of the Amazon S3 bucket into which CloudTrail delivers the trail files.
+ type: str
+ sample: "aws-cloudtrail-logs-xxxx"
+ s3_key_prefix:
+ description: Amazon S3 key prefix that comes after the name of the bucket that is designated for log file delivery.
+ type: str
+ sample: "xxxx"
+ sns_topic_arn:
+ description: ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered.
+ type: str
+ sample: "arn:aws:sns:us-east-2:123456789012:MyTopic"
+ include_global_service_events:
+ description: If True, AWS API calls from AWS global services such as IAM are included.
+ type: bool
+ sample: true
+ is_multi_region_trail:
+ description: Specifies whether the trail exists only in one region or exists in all regions.
+ type: bool
+ sample: true
+ home_region:
+ description: The region in which the trail was created.
+ type: str
+ sample: "us-east-1"
+ trail_arn:
+ description: Specifies the ARN of the trail.
+ type: str
+ sample: "arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail"
+ log_file_validation_enabled:
+ description: Specifies whether log file validation is enabled.
+ type: bool
+ sample: true
+ cloud_watch_logs_log_group_arn:
+ description: Specifies an ARN, that represents the log group to which CloudTrail logs will be delivered.
+ type: str
+ sample: "arn:aws:sns:us-east-2:123456789012:Mylog"
+ cloud_watch_logs_role_arn:
+ description: Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
+ type: str
+ sample: "arn:aws:sns:us-east-2:123456789012:Mylog"
+ kms_key_id:
+ description: Specifies the KMS key ID that encrypts the logs delivered by CloudTrail.
+ type: str
+ sample: "arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012"
+ has_custom_event_selectors:
+ description: Specifies if the trail has custom event selectors.
+ type: bool
+ sample: true
+ has_insight_selectors:
+ description: Specifies whether a trail has insight types specified in an InsightSelector list.
+ type: bool
+ sample: true
+ is_organization_trail:
+ description: Specifies whether the trail is an organization trail.
+ type: bool
+ sample: true
+ is_logging:
+ description: Whether the CloudTrail is currently logging AWS API calls.
+ type: bool
+ sample: true
+ latest_delivery_error:
+ description: Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket.
+ type: str
+ latest_notification_error:
+ description: Displays any Amazon SNS error that CloudTrail encountered when attempting to send a notification.
+ type: str
+ latest_delivery_time:
+ description: Specifies the date and time that CloudTrail last delivered log files to an account's Amazon S3 bucket.
+ type: str
+ start_logging_time:
+ description: Specifies the most recent date and time when CloudTrail started recording API calls for an AWS account.
+ type: str
+ stop_logging_time:
+ description: Specifies the most recent date and time when CloudTrail stopped recording API calls for an AWS account.
+ type: str
+ latest_cloud_watch_logs_delivery_error:
+ description: Displays any CloudWatch Logs error that CloudTrail encountered when attempting to deliver logs to CloudWatch Logs.
+ type: str
+ latest_cloud_watch_logs_delivery_time:
+ description: Displays the most recent date and time when CloudTrail delivered logs to CloudWatch Logs.
+ type: str
+ latest_digest_delivery_time:
+ description: Specifies the date and time that CloudTrail last delivered a digest file to an account's Amazon S3 bucket.
+ type: str
+ latest_digest_delivery_error:
+ description: Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket.
+ type: str
+ resource_id:
+ description: Specifies the ARN of the resource.
+ type: str
+ tags:
+ description: Any tags assigned to the cloudtrail.
+ type: dict
+ returned: always
+ sample: "{ 'my_tag_key': 'my_tag_value' }"
+
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_trails(connection, module):
+ all_trails = []
+ try:
+ result = connection.get_paginator('list_trails')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get the trails.")
+ for trail in result.paginate():
+ all_trails.extend(list_cloud_trails(trail))
+ return all_trails
+
+
+def list_cloud_trails(trail_dict):
+ return [x["TrailARN"] for x in trail_dict["Trails"]]
+
+
+def get_trail_detail(connection, module):
+ output = {}
+ trail_name_list = module.params.get("trail_names")
+ include_shadow_trails = module.params.get("include_shadow_trails")
+ if not trail_name_list:
+ trail_name_list = get_trails(connection, module)
+ try:
+ result = connection.describe_trails(trailNameList=trail_name_list, includeShadowTrails=include_shadow_trails, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get the trails.")
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_cloud_trail = []
+ for cloud_trail in result['trailList']:
+ try:
+ status_dict = connection.get_trail_status(Name=cloud_trail["TrailARN"], aws_retry=True)
+ cloud_trail.update(status_dict)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get the trail status")
+ try:
+ tag_list = connection.list_tags(ResourceIdList=[cloud_trail["TrailARN"]])
+ for tag_dict in tag_list["ResourceTagList"]:
+ cloud_trail.update(tag_dict)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.warn("Failed to get the trail tags - {0}".format(e))
+ snaked_cloud_trail.append(camel_dict_to_snake_dict(cloud_trail))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for tr in snaked_cloud_trail:
+ if 'tags_list' in tr:
+ tr['tags'] = boto3_tag_list_to_ansible_dict(tr['tags_list'], 'key', 'value')
+ del (tr['tags_list'])
+ if 'response_metadata' in tr:
+ del (tr['response_metadata'])
+ output['trail_list'] = snaked_cloud_trail
+ return output
+
+
+def main():
+ argument_spec = dict(
+ trail_names=dict(type='list', elements='str', default=[]),
+ include_shadow_trails=dict(type='bool', default=True),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ try:
+ connection = module.client('cloudtrail', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+ result = get_trail_detail(connection, module)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py
new file mode 100644
index 00000000..68e9694e
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: cloudwatch_metric_alarm
+short_description: "Create/update or delete AWS CloudWatch 'metric alarms'"
+version_added: 5.0.0
+description:
+ - Can create or delete AWS CloudWatch metric alarms.
+ - Metrics you wish to alarm on must already exist.
+ - Prior to release 5.0.0 this module was called C(community.aws.ec2_metric_alarm).
+ The usage did not change.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - "Zacharie Eakin (@Zeekin)"
+options:
+ state:
+ description:
+ - Register or deregister the alarm.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Unique name for the alarm.
+ required: true
+ type: str
+ metric:
+ description:
+ - Name of the monitored metric (e.g. C(CPUUtilization)).
+ - Metric must already exist.
+ required: false
+ type: str
+ namespace:
+ description:
+ - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.), which determines the category it will appear under in CloudWatch.
+ required: false
+ type: str
+ statistic:
+ description:
+ - Operation applied to the metric.
+ - Works in conjunction with I(period) and I(evaluation_periods) to determine the comparison value.
+ required: false
+ choices: ['SampleCount','Average','Sum','Minimum','Maximum']
+ type: str
+ comparison:
+ description:
+ - Determines how the threshold value is compared
+ required: false
+ type: str
+ choices:
+ - 'GreaterThanOrEqualToThreshold'
+ - 'GreaterThanThreshold'
+ - 'LessThanThreshold'
+ - 'LessThanOrEqualToThreshold'
+ threshold:
+ description:
+ - Sets the min/max bound for triggering the alarm.
+ required: false
+ type: float
+ period:
+ description:
+ - The time (in seconds) between metric evaluations.
+ required: false
+ type: int
+ evaluation_periods:
+ description:
+ - The number of times in which the metric is evaluated before final calculation.
+ required: false
+ type: int
+ unit:
+ description:
+ - The threshold's unit of measurement.
+ required: false
+ type: str
+ choices:
+ - 'Seconds'
+ - 'Microseconds'
+ - 'Milliseconds'
+ - 'Bytes'
+ - 'Kilobytes'
+ - 'Megabytes'
+ - 'Gigabytes'
+ - 'Terabytes'
+ - 'Bits'
+ - 'Kilobits'
+ - 'Megabits'
+ - 'Gigabits'
+ - 'Terabits'
+ - 'Percent'
+ - 'Count'
+ - 'Bytes/Second'
+ - 'Kilobytes/Second'
+ - 'Megabytes/Second'
+ - 'Gigabytes/Second'
+ - 'Terabytes/Second'
+ - 'Bits/Second'
+ - 'Kilobits/Second'
+ - 'Megabits/Second'
+ - 'Gigabits/Second'
+ - 'Terabits/Second'
+ - 'Count/Second'
+ - 'None'
+ description:
+ description:
+ - A longer description of the alarm.
+ required: false
+ type: str
+ dimensions:
+ description:
+ - A dictionary describing which metric the alarm is applied to.
+ - 'For more information see the AWS documentation:'
+ - U(https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension)
+ required: false
+ type: dict
+ alarm_actions:
+ description:
+ - A list of the names action(s) taken when the alarm is in the C(alarm) status, denoted as Amazon Resource Name(s).
+ required: false
+ type: list
+ elements: str
+ insufficient_data_actions:
+ description:
+ - A list of the names of action(s) to take when the alarm is in the C(insufficient_data) status.
+ required: false
+ type: list
+ elements: str
+ ok_actions:
+ description:
+ - A list of the names of action(s) to take when the alarm is in the C(ok) status, denoted as Amazon Resource Name(s).
+ required: false
+ type: list
+ elements: str
+ treat_missing_data:
+ description:
+ - Sets how the alarm handles missing data points.
+ required: false
+ type: str
+ choices:
+ - 'breaching'
+ - 'notBreaching'
+ - 'ignore'
+ - 'missing'
+ default: 'missing'
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+ - name: create alarm
+ amazon.aws.cloudwatch_metric_alarm:
+ state: present
+ region: ap-southeast-2
+ name: "cpu-low"
+ metric: "CPUUtilization"
+ namespace: "AWS/EC2"
+ statistic: Average
+ comparison: "LessThanOrEqualToThreshold"
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: "Percent"
+ description: "This will alarm when a instance's CPU usage average is lower than 5% for 15 minutes"
+ dimensions: {'InstanceId':'i-XXX'}
+ alarm_actions: ["action1","action2"]
+
+ - name: Create an alarm to recover a failed instance
+ amazon.aws.cloudwatch_metric_alarm:
+ state: present
+ region: us-west-1
+ name: "recover-instance"
+ metric: "StatusCheckFailed_System"
+ namespace: "AWS/EC2"
+ statistic: "Minimum"
+ comparison: "GreaterThanOrEqualToThreshold"
+ threshold: 1.0
+ period: 60
+ evaluation_periods: 2
+ unit: "Count"
+ description: "This will recover an instance when it fails"
+ dimensions: {"InstanceId":'i-XXX'}
+ alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"]
+'''
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # protected by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def create_metric_alarm(connection, module, params):
+ alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']])
+
+ if not isinstance(params['Dimensions'], list):
+ fixed_dimensions = []
+ for key, value in params['Dimensions'].items():
+ fixed_dimensions.append({'Name': key, 'Value': value})
+ params['Dimensions'] = fixed_dimensions
+
+ if not alarms['MetricAlarms']:
+ try:
+ if not module.check_mode:
+ connection.put_metric_alarm(**params)
+ changed = True
+ except ClientError as e:
+ module.fail_json_aws(e)
+
+ else:
+ changed = False
+ alarm = alarms['MetricAlarms'][0]
+
+ # Workaround for alarms created before TreatMissingData was introduced
+ if 'TreatMissingData' not in alarm.keys():
+ alarm['TreatMissingData'] = 'missing'
+
+ for key in ['ActionsEnabled', 'StateValue', 'StateReason',
+ 'StateReasonData', 'StateUpdatedTimestamp',
+ 'AlarmArn', 'AlarmConfigurationUpdatedTimestamp']:
+ alarm.pop(key, None)
+ if alarm != params:
+ changed = True
+ alarm = params
+
+ try:
+ if changed:
+ if not module.check_mode:
+ connection.put_metric_alarm(**alarm)
+ except ClientError as e:
+ module.fail_json_aws(e)
+
+ try:
+ alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']])
+ except ClientError as e:
+ module.fail_json_aws(e)
+
+ result = {}
+ if alarms['MetricAlarms']:
+ result = alarms['MetricAlarms'][0]
+
+ module.exit_json(changed=changed,
+ name=result.get('AlarmName'),
+ actions_enabled=result.get('ActionsEnabled'),
+ alarm_actions=result.get('AlarmActions'),
+ alarm_arn=result.get('AlarmArn'),
+ comparison=result.get('ComparisonOperator'),
+ description=result.get('AlarmDescription'),
+ dimensions=result.get('Dimensions'),
+ evaluation_periods=result.get('EvaluationPeriods'),
+ insufficient_data_actions=result.get('InsufficientDataActions'),
+ last_updated=result.get('AlarmConfigurationUpdatedTimestamp'),
+ metric=result.get('MetricName'),
+ namespace=result.get('Namespace'),
+ ok_actions=result.get('OKActions'),
+ period=result.get('Period'),
+ state_reason=result.get('StateReason'),
+ state_value=result.get('StateValue'),
+ statistic=result.get('Statistic'),
+ threshold=result.get('Threshold'),
+ treat_missing_data=result.get('TreatMissingData'),
+ unit=result.get('Unit'))
+
+
+def delete_metric_alarm(connection, module, params):
+ alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']])
+
+ if alarms['MetricAlarms']:
+ try:
+ if not module.check_mode:
+ connection.delete_alarms(AlarmNames=[params['AlarmName']])
+ module.exit_json(changed=True)
+ except (ClientError) as e:
+ module.fail_json_aws(e)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ metric=dict(type='str'),
+ namespace=dict(type='str'),
+ statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
+ comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold',
+ 'GreaterThanOrEqualToThreshold']),
+ threshold=dict(type='float'),
+ period=dict(type='int'),
+ unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
+ 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count',
+ 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
+ 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second',
+ 'Terabits/Second', 'Count/Second', 'None']),
+ evaluation_periods=dict(type='int'),
+ description=dict(type='str'),
+ dimensions=dict(type='dict', default={}),
+ alarm_actions=dict(type='list', default=[], elements='str'),
+ insufficient_data_actions=dict(type='list', default=[], elements='str'),
+ ok_actions=dict(type='list', default=[], elements='str'),
+ treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ state = module.params.get('state')
+
+ params = dict()
+ params['AlarmName'] = module.params.get('name')
+ params['MetricName'] = module.params.get('metric')
+ params['Namespace'] = module.params.get('namespace')
+ params['Statistic'] = module.params.get('statistic')
+ params['ComparisonOperator'] = module.params.get('comparison')
+ params['Threshold'] = module.params.get('threshold')
+ params['Period'] = module.params.get('period')
+ params['EvaluationPeriods'] = module.params.get('evaluation_periods')
+ if module.params.get('unit'):
+ params['Unit'] = module.params.get('unit')
+ params['AlarmDescription'] = module.params.get('description')
+ params['Dimensions'] = module.params.get('dimensions')
+ params['AlarmActions'] = module.params.get('alarm_actions', [])
+ params['InsufficientDataActions'] = module.params.get('insufficient_data_actions', [])
+ params['OKActions'] = module.params.get('ok_actions', [])
+ params['TreatMissingData'] = module.params.get('treat_missing_data')
+
+ connection = module.client('cloudwatch')
+
+ if state == 'present':
+ create_metric_alarm(connection, module, params)
+ elif state == 'absent':
+ delete_metric_alarm(connection, module, params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py
new file mode 100644
index 00000000..24678b05
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py
@@ -0,0 +1,323 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudwatch_metric_alarm_info
+version_added: 5.0.0
+short_description: Gather information about the alarms for the specified metric
+description:
+ - Retrieves the alarms for the specified metric.
+author:
+ - Mandar Vijay Kulkarni (@mandar242)
+options:
+ alarm_names:
+ description:
+ - The name of the metric.
+ required: false
+ type: list
+ elements: str
+ alarm_name_prefix:
+ description:
+ - An alarm name prefix to retrieve information about alarms that have names that start with this prefix.
+ - Can not be used with I(alarm_names).
+ required: false
+ type: str
+ alarm_type:
+ description:
+ - Specify this to return metric alarms or composite alarms.
+ - Module is defaulted to return metric alarms but can return composite alarms if I(alarm_type=CompositeAlarm).
+ required: false
+ type: str
+ default: MetricAlarm
+ choices: ['CompositeAlarm', 'MetricAlarm']
+ children_of_alarm_name:
+ description:
+ - If specified returns information about the "children" alarms of the alarm name specified.
+ required: false
+ type: str
+ parents_of_alarm_name:
+ description:
+ - If specified returns information about the "parent" alarms of the alarm name specified.
+ required: false
+ type: str
+ state_value:
+ description:
+ - If specified returns information only about alarms that are currently in the particular state.
+ required: false
+ type: str
+ choices: ['OK', 'ALARM', 'INSUFFICIENT_DATA']
+ action_prefix:
+ description:
+ - This parameter can be used to filter the results of the operation to only those alarms that use a certain alarm action.
+ required: false
+ type: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: describe the metric alarm based on alarm names
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - my-test-alarm-1
+ - my-test-alarm-2
+
+- name: describe the metric alarm based alarm names and state value
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - my-test-alarm-1
+ - my-test-alarm-2
+ state_value: OK
+
+- name: describe the metric alarm based alarm names prefix
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_name_prefix: my-test-
+
+'''
+
+RETURN = '''
+metric_alarms:
+ description: The gathered information about specified metric alarms.
+ returned: when success
+ type: list
+ elements: dict
+ contains:
+ alarm_name:
+ description: Unique name for the alarm.
+ returned: always
+ type: str
+ alarm_arn:
+ description: The Amazon Resource Name (ARN) of the alarm.
+ returned: always
+ type: str
+ alarm_description:
+ description: The description of the alarm.
+ returned: always
+ type: str
+ alarm_configuration_updated_timestamp:
+ description: The time stamp of the last update to the alarm configuration.
+ returned: always
+ type: str
+ actions_enabled:
+ description: Indicates whether actions should be executed during any changes to the alarm state.
+ returned: always
+ type: bool
+ ok_actions:
+ description: The actions to execute when this alarm transitions to an OK state from any other state.
+ returned: always
+ type: list
+ elements: str
+ alarm_actions:
+ description: The actions to execute when this alarm transitions to an ALARM state from any other state.
+ returned: always
+ type: list
+ elements: str
+ insufficient_data_actions:
+ description: The actions to execute when this alarm transitions to an INSUFFICIENT_DATA state from any other state.
+ returned: always
+ type: list
+ elements: str
+ state_value:
+ description: The state value for the alarm.
+ returned: always
+ type: str
+ state_reason:
+ description: An explanation for the alarm state, in text format.
+ returned: always
+ type: str
+ state_reason_data:
+ description: An explanation for the alarm state, in JSON format.
+ returned: always
+ type: str
+ state_updated_timestamp:
+ description: The time stamp of the last update to the alarm state.
+ returned: always
+ type: str
+ metric_name:
+ description: Name of the monitored metric (e.g. C(CPUUtilization)).
+ returned: always
+ type: str
+ namespace:
+ description:
+ - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.).
+ - Determines the category it will appear under in CloudWatch.
+ returned: always
+ type: str
+ statistic:
+ description: The statistic for the metric associated with the alarm, other than percentile.
+ returned: always
+ type: str
+ extended_statistic:
+ description: The percentile statistic for the metric associated with the alarm.
+ returned: always
+ type: str
+ dimensions:
+ description: The dimensions for the metric.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ name:
+ description: The name of the dimension.
+ returned: always
+ type: str
+ value:
+ description: The value of the dimension.
+ returned: always
+ type: str
+ period:
+ description:
+ - The length, in seconds, used each time the metric specified in MetricName is evaluated.
+ - Valid values are 10, 30, and any multiple of 60.
+ returned: always
+ type: int
+ unit:
+ description: Unit used when storing the metric
+ returned: always
+ type: str
+ evaluation_period:
+ description: The number of periods over which data is compared to the specified threshold.
+ returned: always
+ type: int
+ datapoints_to_alarm:
+ description: The number of data points that must be breaching to trigger the alarm.
+ returned: always
+ type: int
+ threshold:
+ description: The value to compare with the specified statistic.
+ returned: always
+ type: float
+ comparison_operator:
+ description: The arithmetic operation to use when comparing the specified statistic and threshold.
+ returned: always
+ type: str
+ treat_missing_data:
+ description: Sets how alarm is to handle missing data points.
+ returned: always
+ type: str
+ evaluate_low_sample_count_percentile:
+ description:
+ - Used only for alarms based on percentiles.
+ - If I(ignore), the alarm state does not change during periods with too few data points to be statistically significant.
+ - If I(evaluate) or this parameter is not used, the alarm is always evaluated and possibly changes state.
+ returned: always
+ type: str
+ metrics:
+ description: An array of MetricDataQuery structures, used in an alarm based on a metric math expression.
+ returned: always
+ type: list
+ elements: dict
+ threshold_metric_id:
+ description: This is the ID of the ANOMALY_DETECTION_BAND function used as the threshold for the alarm.
+ returned: always
+ type: str
+
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_alarms(connection, **params):
+ paginator = connection.get_paginator('describe_alarms')
+ return paginator.paginate(**params).build_full_result()
+
+
+def describe_metric_alarms_info(connection, module):
+
+ params = build_params(module)
+
+ alarm_type_to_return = module.params.get('alarm_type')
+
+ try:
+ describe_metric_alarms_info_response = _describe_alarms(connection, **params)
+ # describe_metric_alarms_info_response = describe_metric_alarms_info_response[alarm_type_to_return]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to describe cloudwatch metric alarm')
+
+ result = []
+
+ if alarm_type_to_return == 'CompositeAlarm':
+ for response_list_item in describe_metric_alarms_info_response['CompositeAlarms']:
+ result.append(camel_dict_to_snake_dict(response_list_item))
+ module.exit_json(composite_alarms=result)
+
+ for response_list_item in describe_metric_alarms_info_response['MetricAlarms']:
+ result.append(camel_dict_to_snake_dict(response_list_item))
+
+ module.exit_json(metric_alarms=result)
+
+
+def build_params(module):
+
+ params = {}
+
+ if module.params.get('alarm_names'):
+ params['AlarmNames'] = module.params.get('alarm_names')
+
+ if module.params.get('alarm_name_prefix'):
+ params['AlarmNamePrefix'] = module.params.get('alarm_name_prefix')
+
+ if module.params.get('children_of_alarm_name'):
+ params['ChildrenOfAlarmName'] = module.params.get('children_of_alarm_name')
+
+ if module.params.get('parents_of_alarm_name'):
+ params['ParentsOfAlarmName'] = module.params.get('parents_of_alarm_name')
+
+ if module.params.get('state_value'):
+ params['StateValue'] = module.params.get('state_value')
+
+ if module.params.get('action_prefix'):
+ params['ActionPrefix'] = module.params.get('action_prefix')
+
+ return params
+
+
+def main():
+
+ argument_spec = dict(
+ alarm_names=dict(type='list', elements='str', required=False),
+ alarm_name_prefix=dict(type='str', required=False),
+ alarm_type=dict(type='str', choices=['CompositeAlarm', 'MetricAlarm'], default='MetricAlarm', required=False),
+ children_of_alarm_name=dict(type='str', required=False),
+ parents_of_alarm_name=dict(type='str', required=False),
+ state_value=dict(type='str', choices=['OK', 'ALARM', 'INSUFFICIENT_DATA'], required=False),
+ action_prefix=dict(type='str', required=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['alarm_names', 'alarm_name_prefix']],
+ supports_check_mode=True
+ )
+
+ try:
+ connection = module.client('cloudwatch', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ describe_metric_alarms_info(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py
new file mode 100644
index 00000000..e0c3e39a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py
@@ -0,0 +1,516 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cloudwatchevent_rule
+version_added: 5.0.0
+short_description: Manage CloudWatch Event rules and targets
+description:
+ - This module creates and manages CloudWatch event rules and targets.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+author:
+ - "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
+notes:
+ - A rule must contain at least an I(event_pattern) or I(schedule_expression). A
+ rule can have both an I(event_pattern) and a I(schedule_expression), in which
+ case the rule will trigger on matching events as well as on a schedule.
+ - When specifying targets, I(input), I(input_path), I(input_paths_map) and I(input_template)
+ are mutually-exclusive and optional parameters.
+options:
+ name:
+ description:
+ - The name of the rule you are creating, updating or deleting. No spaces
+ or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+)).
+ required: true
+ type: str
+ schedule_expression:
+ description:
+ - A cron or rate expression that defines the schedule the rule will
+ trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes)).
+ required: false
+ type: str
+ event_pattern:
+ description:
+ - A string pattern that is used to match against incoming events to determine if the rule
+ should be triggered.
+ required: false
+ type: json
+ state:
+ description:
+ - Whether the rule is present (and enabled), disabled, or absent.
+ choices: ["present", "disabled", "absent"]
+ default: present
+ required: false
+ type: str
+ description:
+ description:
+ - A description of the rule.
+ required: false
+ type: str
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role associated with the rule.
+ required: false
+ type: str
+ targets:
+ type: list
+ elements: dict
+ description:
+ - A list of targets to add to or update for the rule.
+ suboptions:
+ id:
+ type: str
+ required: true
+ description: The unique target assignment ID.
+ arn:
+ type: str
+ required: true
+ description: The ARN associated with the target.
+ role_arn:
+ type: str
+ description: The ARN of the IAM role to be used for this target when the rule is triggered.
+ input:
+ type: json
+ description:
+ - A JSON object that will override the event data passed to the target.
+ - If neither I(input) nor I(input_path) nor I(input_transformer)
+ is specified, then the entire event is passed to the target in JSON form.
+ input_path:
+ type: str
+ description:
+ - A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be
+ passed to the target.
+ - If neither I(input) nor I(input_path) nor I(input_transformer)
+ is specified, then the entire event is passed to the target in JSON form.
+ input_transformer:
+ type: dict
+ description:
+ - Settings to support providing custom input to a target based on certain event data.
+ version_added: 4.1.0
+ version_added_collection: community.aws
+ suboptions:
+ input_paths_map:
+ type: dict
+ description:
+ - A dict that specifies the transformation of the event data to
+ custom input parameters.
+ input_template:
+ type: json
+ description:
+ - A string that templates the values input_paths_map extracted from the event data.
+ It is used to produce the output you want to be sent to the target.
+ ecs_parameters:
+ type: dict
+ description:
+ - Contains the ECS task definition and task count to be used, if the event target is an ECS task.
+ suboptions:
+ task_definition_arn:
+ type: str
+ description: The full ARN of the task definition.
+ required: true
+ task_count:
+ type: int
+ description: The number of tasks to create based on I(task_definition).
+ required: false
+'''
+
+EXAMPLES = r'''
+- amazon.aws.cloudwatchevent_rule:
+ name: MyCronTask
+ schedule_expression: "cron(0 20 * * ? *)"
+ description: Run my scheduled task
+ targets:
+ - id: MyTargetId
+ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
+
+- amazon.aws.cloudwatchevent_rule:
+ name: MyDisabledCronTask
+ schedule_expression: "rate(5 minutes)"
+ description: Run my disabled scheduled task
+ state: disabled
+ targets:
+ - id: MyOtherTargetId
+ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
+ input: '{"foo": "bar"}'
+
+- amazon.aws.cloudwatchevent_rule:
+ name: MyInstanceLaunchEvent
+ description: "Rule for EC2 instance launch"
+ state: present
+ event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}'
+ targets:
+ - id: MyTargetSnsTopic
+ arn: arn:aws:sns:us-east-1:123456789012:MySNSTopic
+ input_transformer:
+ input_paths_map:
+ instance: "$.detail.instance-id"
+ state: "$.detail.state"
+ input_template: "<instance> is in state <state>"
+
+- amazon.aws.cloudwatchevent_rule:
+ name: MyCronTask
+ state: absent
+'''
+
+RETURN = r'''
+rule:
+ description: CloudWatch Event rule data.
+ returned: success
+ type: dict
+ sample:
+ arn: 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask'
+ description: 'Run my scheduled task'
+ name: 'MyCronTask'
+ schedule_expression: 'cron(0 20 * * ? *)'
+ state: 'ENABLED'
+targets:
+ description: CloudWatch Event target(s) assigned to the rule.
+ returned: success
+ type: list
+ sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters
+
+
+def _format_json(json_string):
+ # When passed a simple string, Ansible doesn't quote it to ensure it's a *quoted* string
+ try:
+ json.loads(json_string)
+ return json_string
+ except json.decoder.JSONDecodeError:
+ return str(json.dumps(json_string))
+
+
+class CloudWatchEventRule(object):
+ def __init__(self, module, name, client, schedule_expression=None,
+ event_pattern=None, description=None, role_arn=None):
+ self.name = name
+ self.client = client
+ self.changed = False
+ self.schedule_expression = schedule_expression
+ self.event_pattern = event_pattern
+ self.description = description
+ self.role_arn = role_arn
+ self.module = module
+
+ def describe(self):
+ """Returns the existing details of the rule in AWS"""
+ try:
+ rule_info = self.client.describe_rule(Name=self.name)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return {}
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
+ return self._snakify(rule_info)
+
+ def put(self, enabled=True):
+ """Creates or updates the rule in AWS"""
+ request = {
+ 'Name': self.name,
+ 'State': "ENABLED" if enabled else "DISABLED",
+ }
+ if self.schedule_expression:
+ request['ScheduleExpression'] = self.schedule_expression
+ if self.event_pattern:
+ request['EventPattern'] = self.event_pattern
+ if self.description:
+ request['Description'] = self.description
+ if self.role_arn:
+ request['RoleArn'] = self.role_arn
+ try:
+ response = self.client.put_rule(**request)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def delete(self):
+ """Deletes the rule in AWS"""
+ self.remove_all_targets()
+
+ try:
+ response = self.client.delete_rule(Name=self.name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def enable(self):
+ """Enables the rule in AWS"""
+ try:
+ response = self.client.enable_rule(Name=self.name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def disable(self):
+ """Disables the rule in AWS"""
+ try:
+ response = self.client.disable_rule(Name=self.name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def list_targets(self):
+ """Lists the existing targets for the rule in AWS"""
+ try:
+ targets = self.client.list_targets_by_rule(Rule=self.name)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return []
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
+ return self._snakify(targets)['targets']
+
+ def put_targets(self, targets):
+ """Creates or updates the provided targets on the rule in AWS"""
+ if not targets:
+ return
+ request = {
+ 'Rule': self.name,
+ 'Targets': self._targets_request(targets),
+ }
+ try:
+ response = self.client.put_targets(**request)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def remove_targets(self, target_ids):
+ """Removes the provided targets from the rule in AWS"""
+ if not target_ids:
+ return
+ request = {
+ 'Rule': self.name,
+ 'Ids': target_ids
+ }
+ try:
+ response = self.client.remove_targets(**request)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name)
+ self.changed = True
+ return response
+
+ def remove_all_targets(self):
+ """Removes all targets on rule"""
+ targets = self.list_targets()
+ return self.remove_targets([t['id'] for t in targets])
+
+ def _targets_request(self, targets):
+ """Formats each target for the request"""
+ targets_request = []
+ for target in targets:
+ target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True))
+ if target_request.get('Input', None):
+ target_request['Input'] = _format_json(target_request['Input'])
+ if target_request.get('InputTransformer', None):
+ if target_request.get('InputTransformer').get('InputTemplate', None):
+ target_request['InputTransformer']['InputTemplate'] = _format_json(target_request['InputTransformer']['InputTemplate'])
+ if target_request.get('InputTransformer').get('InputPathsMap', None):
+ target_request['InputTransformer']['InputPathsMap'] = target['input_transformer']['input_paths_map']
+ targets_request.append(target_request)
+ return targets_request
+
+ def _snakify(self, dict):
+ """Converts camel case to snake case"""
+ return camel_dict_to_snake_dict(dict)
+
+
+class CloudWatchEventRuleManager(object):
+ RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
+
+ def __init__(self, rule, targets):
+ self.rule = rule
+ self.targets = targets
+
+ def ensure_present(self, enabled=True):
+ """Ensures the rule and targets are present and synced"""
+ rule_description = self.rule.describe()
+ if rule_description:
+ # Rule exists so update rule, targets and state
+ self._sync_rule(enabled)
+ self._sync_targets()
+ self._sync_state(enabled)
+ else:
+ # Rule does not exist, so create new rule and targets
+ self._create(enabled)
+
+ def ensure_disabled(self):
+ """Ensures the rule and targets are present, but disabled, and synced"""
+ self.ensure_present(enabled=False)
+
+ def ensure_absent(self):
+ """Ensures the rule and targets are absent"""
+ rule_description = self.rule.describe()
+ if not rule_description:
+ # Rule doesn't exist so don't need to delete
+ return
+ self.rule.delete()
+
+ def fetch_aws_state(self):
+ """Retrieves rule and target state from AWS"""
+ aws_state = {
+ 'rule': {},
+ 'targets': [],
+ 'changed': self.rule.changed
+ }
+ rule_description = self.rule.describe()
+ if not rule_description:
+ return aws_state
+
+ # Don't need to include response metadata noise in response
+ del rule_description['response_metadata']
+
+ aws_state['rule'] = rule_description
+ aws_state['targets'].extend(self.rule.list_targets())
+ return aws_state
+
+ def _sync_rule(self, enabled=True):
+ """Syncs local rule state with AWS"""
+ if not self._rule_matches_aws():
+ self.rule.put(enabled)
+
+ def _sync_targets(self):
+ """Syncs local targets with AWS"""
+ # Identify and remove extraneous targets on AWS
+ target_ids_to_remove = self._remote_target_ids_to_remove()
+ if target_ids_to_remove:
+ self.rule.remove_targets(target_ids_to_remove)
+
+ # Identify targets that need to be added or updated on AWS
+ targets_to_put = self._targets_to_put()
+ if targets_to_put:
+ self.rule.put_targets(targets_to_put)
+
+ def _sync_state(self, enabled=True):
+ """Syncs local rule state with AWS"""
+ remote_state = self._remote_state()
+ if enabled and remote_state != 'ENABLED':
+ self.rule.enable()
+ elif not enabled and remote_state != 'DISABLED':
+ self.rule.disable()
+
+ def _create(self, enabled=True):
+ """Creates rule and targets on AWS"""
+ self.rule.put(enabled)
+ self.rule.put_targets(self.targets)
+
+ def _rule_matches_aws(self):
+ """Checks if the local rule data matches AWS"""
+ aws_rule_data = self.rule.describe()
+
+ # The rule matches AWS only if all rule data fields are equal
+ # to their corresponding local value defined in the task
+ return all(
+ getattr(self.rule, field) == aws_rule_data.get(field, None)
+ for field in self.RULE_FIELDS
+ )
+
+ def _targets_to_put(self):
+ """Returns a list of targets that need to be updated or added remotely"""
+ remote_targets = self.rule.list_targets()
+ return [t for t in self.targets if t not in remote_targets]
+
+ def _remote_target_ids_to_remove(self):
+ """Returns a list of targets that need to be removed remotely"""
+ target_ids = [t['id'] for t in self.targets]
+ remote_targets = self.rule.list_targets()
+ return [
+ rt['id'] for rt in remote_targets if rt['id'] not in target_ids
+ ]
+
+ def _remote_state(self):
+ """Returns the remote state from AWS"""
+ description = self.rule.describe()
+ if not description:
+ return
+ return description['state']
+
+
+def main():
+ target_args = dict(
+ type='list', elements='dict', default=[],
+ options=dict(
+ id=dict(type='str', required=True),
+ arn=dict(type='str', required=True),
+ role_arn=dict(type='str'),
+ input=dict(type='json'),
+ input_path=dict(type='str'),
+ input_transformer=dict(
+ type='dict',
+ options=dict(
+ input_paths_map=dict(type='dict'),
+ input_template=dict(type='json'),
+ ),
+ ),
+ ecs_parameters=dict(
+ type='dict',
+ options=dict(
+ task_definition_arn=dict(type='str', required=True),
+ task_count=dict(type='int'),
+ ),
+ ),
+ ),
+ )
+ argument_spec = dict(
+ name=dict(required=True),
+ schedule_expression=dict(),
+ event_pattern=dict(type='json'),
+ state=dict(choices=['present', 'disabled', 'absent'],
+ default='present'),
+ description=dict(),
+ role_arn=dict(),
+ targets=target_args,
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ rule_data = dict(
+ [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
+ )
+ targets = module.params.get('targets')
+ state = module.params.get('state')
+ client = module.client('events')
+
+ cwe_rule = CloudWatchEventRule(module, client=client, **rule_data)
+ cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
+
+ if state == 'present':
+ cwe_rule_manager.ensure_present()
+ elif state == 'disabled':
+ cwe_rule_manager.ensure_disabled()
+ elif state == 'absent':
+ cwe_rule_manager.ensure_absent()
+ else:
+ module.fail_json(msg="Invalid state '{0}' provided".format(state))
+
+ module.exit_json(**cwe_rule_manager.fetch_aws_state())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py
new file mode 100644
index 00000000..ee6df826
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudwatchlogs_log_group
+version_added: 5.0.0
+short_description: create or delete log_group in CloudWatchLogs
+description:
+ - Create or delete log_group in CloudWatchLogs.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+notes:
+ - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html).
+ - Support for I(purge_tags) was added in release 4.0.0.
+author:
+ - Willian Ricardo (@willricardo) <willricardo@gmail.com>
+options:
+ state:
+ description:
+ - Whether the rule is present or absent.
+ choices: ["present", "absent"]
+ default: present
+ required: false
+ type: str
+ log_group_name:
+ description:
+ - The name of the log group.
+ required: true
+ type: str
+ kms_key_id:
+ description:
+ - The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
+ required: false
+ type: str
+ retention:
+ description:
+ - The number of days to retain the log events in the specified log group.
+ - "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]"
+ - Mutually exclusive with I(purge_retention_policy).
+ required: false
+ type: int
+ purge_retention_policy:
+ description:
+ - "Whether to purge the retention policy or not."
+ - "Mutually exclusive with I(retention) and I(overwrite)."
+ default: false
+ required: false
+ type: bool
+ overwrite:
+ description:
+ - Whether an existing log group should be overwritten on create.
+ - Mutually exclusive with I(purge_retention_policy).
+ default: false
+ required: false
+ type: bool
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- amazon.aws.cloudwatchlogs_log_group:
+ log_group_name: test-log-group
+
+- amazon.aws.cloudwatchlogs_log_group:
+ state: present
+ log_group_name: test-log-group
+ tags: { "Name": "test-log-group", "Env" : "QA" }
+
+- amazon.aws.cloudwatchlogs_log_group:
+ state: present
+ log_group_name: test-log-group
+ tags: { "Name": "test-log-group", "Env" : "QA" }
+ kms_key_id: arn:aws:kms:region:account-id:key/key-id
+
+- amazon.aws.cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: test-log-group
+
+'''
+
+RETURN = '''
+log_groups:
+ description: Return the list of complex objects representing log groups
+ returned: success
+ type: complex
+ version_added: 4.0.0
+ version_added_collection: community.aws
+ contains:
+ log_group_name:
+ description: The name of the log group.
+ returned: always
+ type: str
+ creation_time:
+ description: The creation time of the log group.
+ returned: always
+ type: int
+ retention_in_days:
+ description: The number of days to retain the log events in the specified log group.
+ returned: always
+ type: int
+ metric_filter_count:
+ description: The number of metric filters.
+ returned: always
+ type: int
+ arn:
+ description: The Amazon Resource Name (ARN) of the log group.
+ returned: always
+ type: str
+ stored_bytes:
+ description: The number of bytes stored.
+ returned: always
+ type: str
+ kms_key_id:
+ description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
+ returned: always
+ type: str
+ tags:
+ description: A dictionary representing the tags on the log group.
+ returned: always
+ type: dict
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+
+def create_log_group(client, log_group_name, kms_key_id, tags, retention, module):
+ request = {'logGroupName': log_group_name}
+ if kms_key_id:
+ request['kmsKeyId'] = kms_key_id
+ if tags:
+ request['tags'] = tags
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have created log group if not in check_mode.")
+
+ try:
+ client.create_log_group(**request)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create log group")
+
+ if retention:
+ input_retention_policy(client=client,
+ log_group_name=log_group_name,
+ retention=retention, module=module)
+
+ found_log_group = describe_log_group(client=client, log_group_name=log_group_name, module=module)
+
+ if not found_log_group:
+ module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!")
+ return found_log_group
+
+
+def input_retention_policy(client, log_group_name, retention, module):
+ try:
+ permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]
+
+ if retention in permited_values:
+ response = client.put_retention_policy(logGroupName=log_group_name,
+ retentionInDays=retention)
+ else:
+ delete_log_group(client=client, log_group_name=log_group_name, module=module)
+ module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to put retention policy for log group {0}".format(log_group_name))
+
+
+def delete_retention_policy(client, log_group_name, module):
+ if module.check_mode:
+ return True
+
+ try:
+ client.delete_retention_policy(logGroupName=log_group_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete retention policy for log group {0}".format(log_group_name))
+
+
+def delete_log_group(client, log_group_name, module):
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have deleted log group if not in check_mode.")
+
+ try:
+ client.delete_log_group(logGroupName=log_group_name)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to delete log group {0}".format(log_group_name))
+
+
+def describe_log_group(client, log_group_name, module):
+ try:
+ desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name))
+
+ matching_logs = [log for log in desc_log_group.get('logGroups', []) if log['logGroupName'] == log_group_name]
+
+ if not matching_logs:
+ return {}
+
+ found_log_group = matching_logs[0]
+
+ try:
+ tags = client.list_tags_log_group(logGroupName=log_group_name)
+ except is_boto3_error_code('AccessDeniedException'):
+ tags = {}
+ module.warn('Permission denied listing tags for log group {0}'.format(log_group_name))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name))
+
+ found_log_group['tags'] = tags.get('tags', {})
+ return found_log_group
+
+
+def format_result(found_log_group):
+ # Prior to 4.0.0 we documented returning log_groups=[log_group], but returned **log_group
+ # Return both to avoid a breaking change.
+ log_group = camel_dict_to_snake_dict(found_log_group, ignore_list=['tags'])
+ return dict(log_groups=[log_group], **log_group)
+
+
+def ensure_tags(client, found_log_group, desired_tags, purge_tags, module):
+ if desired_tags is None:
+ return False
+
+ group_name = module.params.get('log_group_name')
+ current_tags = found_log_group.get('tags', {})
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags)
+
+ if not tags_to_add and not tags_to_remove:
+ return False
+ if module.check_mode:
+ return True
+
+ try:
+ if tags_to_remove:
+ client.untag_log_group(logGroupName=group_name, tags=tags_to_remove)
+ if tags_to_add:
+ client.tag_log_group(logGroupName=group_name, tags=tags_to_add)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to update tags')
+
+ return True
+
+
+def main():
+ argument_spec = dict(
+ log_group_name=dict(required=True, type='str'),
+ state=dict(choices=['present', 'absent'],
+ default='present'),
+ kms_key_id=dict(required=False, type='str'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(required=False, type='bool', default=True),
+ retention=dict(required=False, type='int'),
+ purge_retention_policy=dict(required=False, type='bool', default=False),
+ overwrite=dict(required=False, type='bool', default=False),
+ )
+
+ mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']]
+ module = AnsibleAWSModule(supports_check_mode=True, argument_spec=argument_spec, mutually_exclusive=mutually_exclusive)
+
+ try:
+ logs = module.client('logs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get('state')
+ changed = False
+
+ # Determine if the log group exists
+ found_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
+
+ if state == 'present':
+ if found_log_group:
+ if module.params['overwrite'] is True:
+ changed = True
+ delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
+ found_log_group = create_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ kms_key_id=module.params['kms_key_id'],
+ tags=module.params['tags'],
+ retention=module.params['retention'],
+ module=module)
+ else:
+ changed |= ensure_tags(client=logs,
+ found_log_group=found_log_group,
+ desired_tags=module.params['tags'],
+ purge_tags=module.params['purge_tags'],
+ module=module)
+ if module.params['purge_retention_policy']:
+ if found_log_group.get('retentionInDays'):
+ changed = True
+ delete_retention_policy(client=logs,
+ log_group_name=module.params['log_group_name'],
+ module=module)
+ elif module.params['retention'] != found_log_group.get('retentionInDays'):
+ if module.params['retention'] is not None:
+ changed = True
+ input_retention_policy(client=logs,
+ log_group_name=module.params['log_group_name'],
+ retention=module.params['retention'],
+ module=module)
+ if changed:
+ found_log_group = describe_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ module=module)
+
+ elif not found_log_group:
+ changed = True
+ found_log_group = create_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ kms_key_id=module.params['kms_key_id'],
+ tags=module.params['tags'],
+ retention=module.params['retention'],
+ module=module)
+
+ result = format_result(found_log_group)
+ module.exit_json(changed=changed, **result)
+
+ elif state == 'absent':
+ if found_log_group:
+ changed = True
+ delete_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ module=module)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py
new file mode 100644
index 00000000..cb4c3808
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudwatchlogs_log_group_info
+version_added: 5.0.0
+short_description: Get information about log_group in CloudWatchLogs
+description:
+ - Lists the specified log groups. You can list all your log groups or filter the results by prefix.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - Willian Ricardo (@willricardo) <willricardo@gmail.com>
+options:
+ log_group_name:
+ description:
+ - The name or prefix of the log group to filter by.
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- amazon.aws.cloudwatchlogs_log_group_info:
+ log_group_name: test-log-group
+'''
+
+RETURN = '''
+log_groups:
+ description: Return the list of complex objects representing log groups
+ returned: success
+ type: complex
+ contains:
+ log_group_name:
+ description: The name of the log group.
+ returned: always
+ type: str
+ creation_time:
+ description: The creation time of the log group.
+ returned: always
+ type: int
+ retention_in_days:
+ description: The number of days to retain the log events in the specified log group.
+ returned: always
+ type: int
+ metric_filter_count:
+ description: The number of metric filters.
+ returned: always
+ type: int
+ arn:
+ description: The Amazon Resource Name (ARN) of the log group.
+ returned: always
+ type: str
+ stored_bytes:
+ description: The number of bytes stored.
+ returned: always
+ type: str
+ kms_key_id:
+ description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
+ returned: always
+ type: str
+ tags:
+ description: A dictionary representing the tags on the log group.
+ returned: always
+ type: dict
+ version_added: 4.0.0
+ version_added_collection: community.aws
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def describe_log_group(client, log_group_name, module):
+ params = {}
+ if log_group_name:
+ params['logGroupNamePrefix'] = log_group_name
+ try:
+ paginator = client.get_paginator('describe_log_groups')
+ desc_log_group = paginator.paginate(**params).build_full_result()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name))
+
+ for log_group in desc_log_group['logGroups']:
+ log_group_name = log_group['logGroupName']
+ try:
+ tags = client.list_tags_log_group(logGroupName=log_group_name)
+ except is_boto3_error_code('AccessDeniedException'):
+ tags = {}
+ module.warn('Permission denied listing tags for log group {0}'.format(log_group_name))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name))
+ log_group['tags'] = tags.get('tags', {})
+
+ return desc_log_group
+
+
+def main():
+ argument_spec = dict(
+ log_group_name=dict(),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ try:
+ logs = module.client('logs')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ desc_log_group = describe_log_group(client=logs,
+ log_group_name=module.params['log_group_name'],
+ module=module)
+ final_log_group_snake = []
+
+ for log_group in desc_log_group['logGroups']:
+ final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=['tags']))
+
+ desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
+ module.exit_json(**desc_log_group_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py
new file mode 100644
index 00000000..82435f4c
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudwatchlogs_log_group_metric_filter
+version_added: 5.0.0
+author:
+ - "Markus Bergholz (@markuman)"
+short_description: Manage CloudWatch log group metric filter
+description:
+ - Create, modify and delete CloudWatch log group metric filter.
+ - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm).
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ state:
+ description:
+ - Whether the rule is present or absent.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ log_group_name:
+ description:
+ - The name of the log group where the metric filter is applied on.
+ required: true
+ type: str
+ filter_name:
+ description:
+ - A name for the metric filter you create.
+ required: true
+ type: str
+ filter_pattern:
+ description:
+ - A filter pattern for extracting metric data out of ingested log events. Required when I(state=present).
+ type: str
+ metric_transformation:
+ description:
+ - A collection of information that defines how metric data gets emitted. Required when I(state=present).
+ type: dict
+ suboptions:
+ metric_name:
+ description:
+ - The name of the cloudWatch metric.
+ type: str
+ metric_namespace:
+ description:
+ - The namespace of the cloudWatch metric.
+ type: str
+ metric_value:
+ description:
+ - The value to publish to the cloudWatch metric when a filter pattern matches a log event.
+ type: str
+ default_value:
+ description:
+ - The value to emit when a filter pattern does not match a log event.
+ type: float
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: set metric filter on log group /fluentd/testcase
+ amazon.aws.cloudwatchlogs_log_group_metric_filter:
+ log_group_name: /fluentd/testcase
+ filter_name: BoxFreeStorage
+ filter_pattern: '{($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: fluentd_metrics
+ metric_value: "$.value"
+
+- name: delete metric filter on log group /fluentd/testcase
+ amazon.aws.cloudwatchlogs_log_group_metric_filter:
+ log_group_name: /fluentd/testcase
+ filter_name: BoxFreeStorage
+ state: absent
+'''
+
+RETURN = """
+metric_filters:
+ description: Return the origin response value
+ returned: success
+ type: list
+ sample: [
+ {
+ "default_value": 3.1415,
+ "metric_name": "box_free_space",
+ "metric_namespace": "made_with_ansible",
+ "metric_value": "$.value"
+ }
+ ]
+
+"""
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+def metricTransformationHandler(metricTransformations, originMetricTransformations=None):
+
+ if originMetricTransformations:
+ change = False
+ originMetricTransformations = camel_dict_to_snake_dict(
+ originMetricTransformations)
+ for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]:
+ if metricTransformations.get(item) != originMetricTransformations.get(item):
+ change = True
+ else:
+ change = True
+
+ defaultValue = metricTransformations.get("default_value")
+ if isinstance(defaultValue, int) or isinstance(defaultValue, float):
+ retval = [
+ {
+ 'metricName': metricTransformations.get("metric_name"),
+ 'metricNamespace': metricTransformations.get("metric_namespace"),
+ 'metricValue': metricTransformations.get("metric_value"),
+ 'defaultValue': defaultValue
+ }
+ ]
+ else:
+ retval = [
+ {
+ 'metricName': metricTransformations.get("metric_name"),
+ 'metricNamespace': metricTransformations.get("metric_namespace"),
+ 'metricValue': metricTransformations.get("metric_value"),
+ }
+ ]
+
+ return retval, change
+
+
+def main():
+
+ arg_spec = dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ log_group_name=dict(type='str', required=True),
+ filter_name=dict(type='str', required=True),
+ filter_pattern=dict(type='str'),
+ metric_transformation=dict(type='dict', options=dict(
+ metric_name=dict(type='str'),
+ metric_namespace=dict(type='str'),
+ metric_value=dict(type='str'),
+ default_value=dict(type='float')
+ )),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])]
+ )
+
+ log_group_name = module.params.get("log_group_name")
+ filter_name = module.params.get("filter_name")
+ filter_pattern = module.params.get("filter_pattern")
+ metric_transformation = module.params.get("metric_transformation")
+ state = module.params.get("state")
+
+ cwl = module.client('logs')
+
+ # check if metric filter exists
+ response = cwl.describe_metric_filters(
+ logGroupName=log_group_name,
+ filterNamePrefix=filter_name
+ )
+
+ if len(response.get("metricFilters")) == 1:
+ originMetricTransformations = response.get(
+ "metricFilters")[0].get("metricTransformations")[0]
+ originFilterPattern = response.get("metricFilters")[
+ 0].get("filterPattern")
+ else:
+ originMetricTransformations = None
+ originFilterPattern = None
+ change = False
+ metricTransformation = None
+
+ if state == "absent" and originMetricTransformations:
+ if not module.check_mode:
+ response = cwl.delete_metric_filter(
+ logGroupName=log_group_name,
+ filterName=filter_name
+ )
+ change = True
+ metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]]
+
+ elif state == "present":
+ metricTransformation, change = metricTransformationHandler(
+ metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations)
+
+ change = change or filter_pattern != originFilterPattern
+
+ if change:
+ if not module.check_mode:
+ response = cwl.put_metric_filter(
+ logGroupName=log_group_name,
+ filterName=filter_name,
+ filterPattern=filter_pattern,
+ metricTransformations=metricTransformation
+ )
+
+ metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation]
+
+ module.exit_json(changed=change, metric_filters=metricTransformation)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
new file mode 100644
index 00000000..1e80ddc9
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
@@ -0,0 +1,761 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami
+version_added: 1.0.0
+short_description: Create or destroy an image (AMI) in EC2
+description:
+ - Registers or deregisters EC2 images.
+options:
+ instance_id:
+ description:
+ - Instance ID to create the AMI from.
+ type: str
+ name:
+ description:
+ - The name of the new AMI.
+ type: str
+ architecture:
+ description:
+ - The target architecture of the image to register.
+ default: "x86_64"
+ type: str
+ kernel_id:
+ description:
+ - The target kernel id of the image to register.
+ type: str
+ virtualization_type:
+ description:
+ - The virtualization type of the image to register.
+ default: "hvm"
+ type: str
+ root_device_name:
+ description:
+ - The root device name of the image to register.
+ type: str
+ wait:
+ description:
+ - Wait for the AMI to be in state 'available' before returning.
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 1200
+ type: int
+ state:
+ description:
+ - Register or deregister an AMI.
+ default: 'present'
+ choices: [ "absent", "present" ]
+ type: str
+ description:
+ description:
+ - Human-readable string describing the contents and purpose of the AMI.
+ type: str
+ no_reboot:
+ description:
+ - Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the
+ responsibility of maintaining file system integrity is left to the owner of the instance.
+ default: false
+ type: bool
+ image_id:
+ description:
+ - Image ID to be deregistered.
+ type: str
+ device_mapping:
+ description:
+ - List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters).
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ type: str
+ description:
+ - The device name. For example C(/dev/sda).
+ required: true
+ virtual_name:
+ type: str
+ description:
+ - The virtual name for the device.
+ - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html).
+ no_device:
+ type: bool
+ description:
+ - Suppresses the specified device included in the block device mapping of the AMI.
+ volume_type:
+ type: str
+ description: The volume type. Defaults to C(gp2) when not set.
+ delete_on_termination:
+ type: bool
+ description: Whether the device should be automatically deleted when the Instance is terminated.
+ snapshot_id:
+ type: str
+ description: The ID of the Snapshot.
+ iops:
+ type: int
+ description: When using an C(io1) I(volume_type) this sets the number of IOPS provisioned for the volume.
+ encrypted:
+ type: bool
+ description: Whether the volume should be encrypted.
+ volume_size:
+ aliases: ['size']
+ type: int
+ description: The size of the volume (in GiB).
+ delete_snapshot:
+ description:
+ - Delete snapshots when deregistering the AMI.
+ default: false
+ type: bool
+ launch_permissions:
+ description:
+ - Users and groups that should be able to launch the AMI.
+ - Expects dictionary with a key of C(user_ids) and/or C(group_names).
+ - C(user_ids) should be a list of account IDs.
+ - C(group_name) should be a list of groups, C(all) is the only acceptable value currently.
+ - You must pass all desired launch permissions if you wish to modify existing launch permissions (passing just groups will remove all users).
+ type: dict
+ image_location:
+ description:
+ - The S3 location of an image to use for the AMI.
+ type: str
+ enhanced_networking:
+ description:
+ - A boolean representing whether enhanced networking with ENA is enabled or not.
+ type: bool
+ billing_products:
+ description:
+ - A list of valid billing codes. To be used with valid accounts by AWS Marketplace vendors.
+ type: list
+ elements: str
+ ramdisk_id:
+ description:
+ - The ID of the RAM disk.
+ type: str
+ sriov_net_support:
+ description:
+ - Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI.
+ type: str
+author:
+ - "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
+ - "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>"
+ - "Ross Williams (@gunzy83) <gunzy83au@gmail.com>"
+ - "Willem van Ketwich (@wilvk) <willvk@gmail.com>"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+# Thank you to iAcquire for sponsoring development of this module.
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Basic AMI Creation
+ amazon.aws.ec2_ami:
+ instance_id: i-xxxxxx
+ wait: true
+ name: newtest
+ tags:
+ Name: newtest
+ Service: TestService
+
+- name: Basic AMI Creation, without waiting
+ amazon.aws.ec2_ami:
+ instance_id: i-xxxxxx
+ wait: no
+ name: newtest
+
+- name: AMI Registration from EBS Snapshot
+ amazon.aws.ec2_ami:
+ name: newtest
+ state: present
+ architecture: x86_64
+ virtualization_type: hvm
+ root_device_name: /dev/xvda
+ device_mapping:
+ - device_name: /dev/xvda
+ volume_size: 8
+ snapshot_id: snap-xxxxxxxx
+ delete_on_termination: true
+ volume_type: gp2
+
+- name: AMI Creation, with a custom root-device size and another EBS attached
+ amazon.aws.ec2_ami:
+ instance_id: i-xxxxxx
+ name: newtest
+ device_mapping:
+ - device_name: /dev/sda1
+ size: XXX
+ delete_on_termination: true
+ volume_type: gp2
+ - device_name: /dev/sdb
+ size: YYY
+ delete_on_termination: false
+ volume_type: gp2
+
+- name: AMI Creation, excluding a volume attached at /dev/sdb
+ amazon.aws.ec2_ami:
+ instance_id: i-xxxxxx
+ name: newtest
+ device_mapping:
+ - device_name: /dev/sda1
+ size: XXX
+ delete_on_termination: true
+ volume_type: gp2
+ - device_name: /dev/sdb
+ no_device: true
+
+- name: Deregister/Delete AMI (keep associated snapshots)
+ amazon.aws.ec2_ami:
+ image_id: "{{ instance.image_id }}"
+ delete_snapshot: False
+ state: absent
+
+- name: Deregister AMI (delete associated snapshots too)
+ amazon.aws.ec2_ami:
+ image_id: "{{ instance.image_id }}"
+ delete_snapshot: True
+ state: absent
+
+- name: Update AMI Launch Permissions, making it public
+ amazon.aws.ec2_ami:
+ image_id: "{{ instance.image_id }}"
+ state: present
+ launch_permissions:
+ group_names: ['all']
+
+- name: Allow AMI to be launched by another account
+ amazon.aws.ec2_ami:
+ image_id: "{{ instance.image_id }}"
+ state: present
+ launch_permissions:
+ user_ids: ['123456789012']
+'''
+
+RETURN = '''
+architecture:
+ description: Architecture of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "x86_64"
+block_device_mapping:
+ description: Block device mapping associated with image.
+ returned: when AMI is created or already exists
+ type: dict
+ sample: {
+ "/dev/sda1": {
+ "delete_on_termination": true,
+ "encrypted": false,
+ "size": 10,
+ "snapshot_id": "snap-1a03b80e7",
+ "volume_type": "standard"
+ }
+ }
+creationDate:
+ description: Creation date of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "2015-10-15T22:43:44.000Z"
+description:
+ description: Description of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "nat-server"
+hypervisor:
+ description: Type of hypervisor.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "xen"
+image_id:
+ description: ID of the image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "ami-1234abcd"
+is_public:
+ description: Whether image is public.
+ returned: when AMI is created or already exists
+ type: bool
+ sample: false
+launch_permission:
+ description: Permissions allowing other accounts to access the AMI.
+ returned: when AMI is created or already exists
+ type: list
+ sample:
+ - group: "all"
+location:
+ description: Location of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "123456789012/nat-server"
+name:
+ description: AMI name of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "nat-server"
+ownerId:
+ description: Owner of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "123456789012"
+platform:
+ description: Platform of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: null
+root_device_name:
+ description: Root device name of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "/dev/sda1"
+root_device_type:
+ description: Root device type of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "ebs"
+state:
+ description: State of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "available"
+tags:
+ description: A dictionary of tags assigned to image.
+ returned: when AMI is created or already exists
+ type: dict
+ sample: {
+ "Env": "devel",
+ "Name": "nat-server"
+ }
+virtualization_type:
+ description: Image virtualization type.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "hvm"
+snapshots_deleted:
+ description: A list of snapshot ids deleted after deregistering image.
+ returned: after AMI is deregistered, if I(delete_snapshot=true)
+ type: list
+ sample: [
+ "snap-fbcccb8f",
+ "snap-cfe7cdb4"
+ ]
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+def get_block_device_mapping(image):
+ bdm_dict = dict()
+ if image is not None and image.get('block_device_mappings') is not None:
+ bdm = image.get('block_device_mappings')
+ for device in bdm:
+ device_name = device.get('device_name')
+ if 'ebs' in device:
+ ebs = device.get("ebs")
+ bdm_dict_item = {
+ 'size': ebs.get("volume_size"),
+ 'snapshot_id': ebs.get("snapshot_id"),
+ 'volume_type': ebs.get("volume_type"),
+ 'encrypted': ebs.get("encrypted"),
+ 'delete_on_termination': ebs.get("delete_on_termination")
+ }
+ elif 'virtual_name' in device:
+ bdm_dict_item = dict(virtual_name=device['virtual_name'])
+ bdm_dict[device_name] = bdm_dict_item
+ return bdm_dict
+
+
+def get_ami_info(camel_image):
+ image = camel_dict_to_snake_dict(camel_image)
+ return dict(
+ image_id=image.get("image_id"),
+ state=image.get("state"),
+ architecture=image.get("architecture"),
+ block_device_mapping=get_block_device_mapping(image),
+ creationDate=image.get("creation_date"),
+ description=image.get("description"),
+ hypervisor=image.get("hypervisor"),
+ is_public=image.get("public"),
+ location=image.get("image_location"),
+ ownerId=image.get("owner_id"),
+ root_device_name=image.get("root_device_name"),
+ root_device_type=image.get("root_device_type"),
+ virtualization_type=image.get("virtualization_type"),
+ name=image.get("name"),
+ tags=boto3_tag_list_to_ansible_dict(image.get('tags')),
+ platform=image.get("platform"),
+ enhanced_networking=image.get("ena_support"),
+ image_owner_alias=image.get("image_owner_alias"),
+ image_type=image.get("image_type"),
+ kernel_id=image.get("kernel_id"),
+ product_codes=image.get("product_codes"),
+ ramdisk_id=image.get("ramdisk_id"),
+ sriov_net_support=image.get("sriov_net_support"),
+ state_reason=image.get("state_reason"),
+ launch_permissions=image.get('launch_permissions')
+ )
+
+
+def create_image(module, connection):
+ instance_id = module.params.get('instance_id')
+ name = module.params.get('name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ description = module.params.get('description')
+ architecture = module.params.get('architecture')
+ kernel_id = module.params.get('kernel_id')
+ root_device_name = module.params.get('root_device_name')
+ virtualization_type = module.params.get('virtualization_type')
+ no_reboot = module.params.get('no_reboot')
+ device_mapping = module.params.get('device_mapping')
+ tags = module.params.get('tags')
+ launch_permissions = module.params.get('launch_permissions')
+ image_location = module.params.get('image_location')
+ enhanced_networking = module.params.get('enhanced_networking')
+ billing_products = module.params.get('billing_products')
+ ramdisk_id = module.params.get('ramdisk_id')
+ sriov_net_support = module.params.get('sriov_net_support')
+
+ if module.check_mode:
+ image = connection.describe_images(Filters=[{'Name': 'name', 'Values': [str(name)]}])
+ if not image['Images']:
+ module.exit_json(changed=True, msg='Would have created a AMI if not in check mode.')
+ else:
+ module.exit_json(changed=False, msg='Error registering image: AMI name is already in use by another AMI')
+
+ try:
+ params = {
+ 'Name': name,
+ 'Description': description
+ }
+
+ block_device_mapping = None
+ # Remove empty values injected by using options
+ if device_mapping:
+ block_device_mapping = []
+ for device in device_mapping:
+ device = dict((k, v) for k, v in device.items() if v is not None)
+ device['Ebs'] = {}
+ device = rename_item_if_exists(device, 'device_name', 'DeviceName')
+ device = rename_item_if_exists(device, 'virtual_name', 'VirtualName')
+ device = rename_item_if_exists(device, 'no_device', 'NoDevice')
+ device = rename_item_if_exists(device, 'volume_type', 'VolumeType', 'Ebs')
+ device = rename_item_if_exists(device, 'snapshot_id', 'SnapshotId', 'Ebs')
+ device = rename_item_if_exists(device, 'delete_on_termination', 'DeleteOnTermination', 'Ebs')
+ device = rename_item_if_exists(device, 'size', 'VolumeSize', 'Ebs', attribute_type=int)
+ device = rename_item_if_exists(device, 'volume_size', 'VolumeSize', 'Ebs', attribute_type=int)
+ device = rename_item_if_exists(device, 'iops', 'Iops', 'Ebs')
+ device = rename_item_if_exists(device, 'encrypted', 'Encrypted', 'Ebs')
+
+ # The NoDevice parameter in Boto3 is a string. Empty string omits the device from block device mapping
+ # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_image
+ if 'NoDevice' in device:
+ if device['NoDevice'] is True:
+ device['NoDevice'] = ""
+ else:
+ del device['NoDevice']
+ block_device_mapping.append(device)
+ if block_device_mapping:
+ params['BlockDeviceMappings'] = block_device_mapping
+ if instance_id:
+ params['InstanceId'] = instance_id
+ params['NoReboot'] = no_reboot
+ tag_spec = boto3_tag_specifications(tags, types=['image', 'snapshot'])
+ if tag_spec:
+ params['TagSpecifications'] = tag_spec
+ image_id = connection.create_image(aws_retry=True, **params).get('ImageId')
+ else:
+ if architecture:
+ params['Architecture'] = architecture
+ if virtualization_type:
+ params['VirtualizationType'] = virtualization_type
+ if image_location:
+ params['ImageLocation'] = image_location
+ if enhanced_networking:
+ params['EnaSupport'] = enhanced_networking
+ if billing_products:
+ params['BillingProducts'] = billing_products
+ if ramdisk_id:
+ params['RamdiskId'] = ramdisk_id
+ if sriov_net_support:
+ params['SriovNetSupport'] = sriov_net_support
+ if kernel_id:
+ params['KernelId'] = kernel_id
+ if root_device_name:
+ params['RootDeviceName'] = root_device_name
+ image_id = connection.register_image(aws_retry=True, **params).get('ImageId')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error registering image")
+
+ if wait:
+ delay = 15
+ max_attempts = wait_timeout // delay
+ waiter = get_waiter(connection, 'image_available')
+ waiter.wait(ImageIds=[image_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts))
+
+ if tags and 'TagSpecifications' not in params:
+ image_info = get_image_by_id(module, connection, image_id)
+ add_ec2_tags(connection, module, image_id, tags)
+ if image_info and image_info.get('BlockDeviceMappings'):
+ for mapping in image_info.get('BlockDeviceMappings'):
+ # We can only tag Ebs volumes
+ if 'Ebs' not in mapping:
+ continue
+ add_ec2_tags(connection, module, mapping.get('Ebs').get('SnapshotId'), tags)
+
+ if launch_permissions:
+ try:
+ params = dict(Attribute='LaunchPermission', ImageId=image_id, LaunchPermission=dict(Add=list()))
+ for group_name in launch_permissions.get('group_names', []):
+ params['LaunchPermission']['Add'].append(dict(Group=group_name))
+ for user_id in launch_permissions.get('user_ids', []):
+ params['LaunchPermission']['Add'].append(dict(UserId=str(user_id)))
+ if params['LaunchPermission']['Add']:
+ connection.modify_image_attribute(aws_retry=True, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error setting launch permissions for image %s" % image_id)
+
+ module.exit_json(msg="AMI creation operation complete.", changed=True,
+ **get_ami_info(get_image_by_id(module, connection, image_id)))
+
+
+def deregister_image(module, connection):
+ image_id = module.params.get('image_id')
+ delete_snapshot = module.params.get('delete_snapshot')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ image = get_image_by_id(module, connection, image_id)
+
+ if image is None:
+ module.exit_json(changed=False)
+
+ # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable.
+ snapshots = []
+ if 'BlockDeviceMappings' in image:
+ for mapping in image.get('BlockDeviceMappings'):
+ snapshot_id = mapping.get('Ebs', {}).get('SnapshotId')
+ if snapshot_id is not None:
+ snapshots.append(snapshot_id)
+
+ # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes.
+ if 'ImageId' in image:
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have deregistered AMI if not in check mode.')
+ try:
+ connection.deregister_image(aws_retry=True, ImageId=image_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error deregistering image")
+ else:
+ module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False)
+
+ image = get_image_by_id(module, connection, image_id)
+ wait_timeout = time.time() + wait_timeout
+
+ while wait and wait_timeout > time.time() and image is not None:
+ image = get_image_by_id(module, connection, image_id)
+ time.sleep(3)
+
+ if wait and wait_timeout <= time.time():
+ module.fail_json(msg="Timed out waiting for image to be deregistered.")
+
+ exit_params = {'msg': "AMI deregister operation complete.", 'changed': True}
+
+ if delete_snapshot:
+ for snapshot_id in snapshots:
+ try:
+ connection.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id)
+ # Don't error out if root volume snapshot was already deregistered as part of deregister_image
+ except is_boto3_error_code('InvalidSnapshot.NotFound'):
+ pass
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to delete snapshot.')
+ exit_params['snapshots_deleted'] = snapshots
+
+ module.exit_json(**exit_params)
+
+
+def update_image(module, connection, image_id):
+ launch_permissions = module.params.get('launch_permissions')
+ image = get_image_by_id(module, connection, image_id)
+ if image is None:
+ module.fail_json(msg="Image %s does not exist" % image_id, changed=False)
+ changed = False
+
+ if launch_permissions is not None:
+ current_permissions = image['LaunchPermissions']
+
+ current_users = set(permission['UserId'] for permission in current_permissions if 'UserId' in permission)
+ desired_users = set(str(user_id) for user_id in launch_permissions.get('user_ids', []))
+ current_groups = set(permission['Group'] for permission in current_permissions if 'Group' in permission)
+ desired_groups = set(launch_permissions.get('group_names', []))
+
+ to_add_users = desired_users - current_users
+ to_remove_users = current_users - desired_users
+ to_add_groups = desired_groups - current_groups
+ to_remove_groups = current_groups - desired_groups
+
+ to_add = [dict(Group=group) for group in to_add_groups] + [dict(UserId=user_id) for user_id in to_add_users]
+ to_remove = [dict(Group=group) for group in to_remove_groups] + [dict(UserId=user_id) for user_id in to_remove_users]
+
+ if to_add or to_remove:
+ try:
+ if not module.check_mode:
+ connection.modify_image_attribute(aws_retry=True,
+ ImageId=image_id, Attribute='launchPermission',
+ LaunchPermission=dict(Add=to_add, Remove=to_remove))
+ changed = True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error updating launch permissions of image %s" % image_id)
+
+ desired_tags = module.params.get('tags')
+ if desired_tags is not None:
+ changed |= ensure_ec2_tags(connection, module, image_id, tags=desired_tags, purge_tags=module.params.get('purge_tags'))
+
+ description = module.params.get('description')
+ if description and description != image['Description']:
+ try:
+ if not module.check_mode:
+ connection.modify_image_attribute(aws_retry=True, Attribute='Description ', ImageId=image_id, Description=dict(Value=description))
+ changed = True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error setting description for image %s" % image_id)
+
+ if changed:
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have updated AMI if not in check mode.')
+ module.exit_json(msg="AMI updated.", changed=True,
+ **get_ami_info(get_image_by_id(module, connection, image_id)))
+ else:
+ module.exit_json(msg="AMI not updated.", changed=False,
+ **get_ami_info(get_image_by_id(module, connection, image_id)))
+
+
+def get_image_by_id(module, connection, image_id):
+ try:
+ try:
+ images_response = connection.describe_images(aws_retry=True, ImageIds=[image_id])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error retrieving image %s" % image_id)
+ images = images_response.get('Images')
+ no_images = len(images)
+ if no_images == 0:
+ return None
+ if no_images == 1:
+ result = images[0]
+ try:
+ result['LaunchPermissions'] = connection.describe_image_attribute(aws_retry=True, Attribute='launchPermission',
+ ImageId=image_id)['LaunchPermissions']
+ result['ProductCodes'] = connection.describe_image_attribute(aws_retry=True, Attribute='productCodes',
+ ImageId=image_id)['ProductCodes']
+ except is_boto3_error_code('InvalidAMIID.Unavailable'):
+ pass
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id)
+ return result
+ module.fail_json(msg="Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error retrieving image by image_id")
+
+
+def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None):
+ new_item = dict_object.get(attribute)
+ if new_item is not None:
+ if attribute_type is not None:
+ new_item = attribute_type(new_item)
+ if child_node is None:
+ dict_object[new_attribute] = new_item
+ else:
+ dict_object[child_node][new_attribute] = new_item
+ dict_object.pop(attribute)
+ return dict_object
+
+
+def main():
+ mapping_options = dict(
+ device_name=dict(type='str', required=True),
+ virtual_name=dict(type='str'),
+ no_device=dict(type='bool'),
+ volume_type=dict(type='str'),
+ delete_on_termination=dict(type='bool'),
+ snapshot_id=dict(type='str'),
+ iops=dict(type='int'),
+ encrypted=dict(type='bool'),
+ volume_size=dict(type='int', aliases=['size']),
+ )
+ argument_spec = dict(
+ instance_id=dict(),
+ image_id=dict(),
+ architecture=dict(default='x86_64'),
+ kernel_id=dict(),
+ virtualization_type=dict(default='hvm'),
+ root_device_name=dict(),
+ delete_snapshot=dict(default=False, type='bool'),
+ name=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(default=1200, type='int'),
+ description=dict(default=''),
+ no_reboot=dict(default=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent']),
+ device_mapping=dict(type='list', elements='dict', options=mapping_options),
+ launch_permissions=dict(type='dict'),
+ image_location=dict(),
+ enhanced_networking=dict(type='bool'),
+ billing_products=dict(type='list', elements='str',),
+ ramdisk_id=dict(),
+ sriov_net_support=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['state', 'absent', ['image_id']],
+ ],
+ supports_check_mode=True,
+ )
+
+ # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by
+ # the required_if for state=absent, so check manually instead
+ if not any([module.params['image_id'], module.params['name']]):
+ module.fail_json(msg="one of the following is required: name, image_id")
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ if module.params.get('state') == 'absent':
+ deregister_image(module, connection)
+ elif module.params.get('state') == 'present':
+ if module.params.get('image_id'):
+ update_image(module, connection, module.params.get('image_id'))
+ if not module.params.get('instance_id') and not module.params.get('device_mapping'):
+ module.fail_json(msg="The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image.")
+ create_image(module, connection)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
new file mode 100644
index 00000000..81b1c94e
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_info
+version_added: 1.0.0
+short_description: Gather information about ec2 AMIs
+description:
+ - Gather information about ec2 AMIs
+author:
+ - Prasad Katti (@prasadkatti)
+options:
+ image_ids:
+ description: One or more image IDs.
+ aliases: [image_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
+ - Filter names and values are case sensitive.
+ type: dict
+ owners:
+ description:
+ - Filter the images by the owner. Valid options are an AWS account ID, self,
+ or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
+ aliases: [owner]
+ type: list
+ elements: str
+ executable_users:
+ description:
+ - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
+ aliases: [executable_user]
+ type: list
+ elements: str
+ describe_image_attributes:
+ description:
+ - Describe attributes (like launchPermission) of the images found.
+ default: false
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: gather information about an AMI using ami-id
+ amazon.aws.ec2_ami_info:
+ image_ids: ami-5b488823
+
+- name: gather information about all AMIs with tag key Name and value webapp
+ amazon.aws.ec2_ami_info:
+ filters:
+ "tag:Name": webapp
+
+- name: gather information about an AMI with 'AMI Name' equal to foobar
+ amazon.aws.ec2_ami_info:
+ filters:
+ name: foobar
+
+- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
+ amazon.aws.ec2_ami_info:
+ owners: 099720109477
+ filters:
+ name: "ubuntu/images/ubuntu-zesty-17.04-*"
+'''
+
+RETURN = '''
+images:
+ description: A list of images.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ architecture:
+ description: The architecture of the image.
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ device_name:
+ description: The device name exposed to the instance.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ ebs:
+ description: EBS volumes
+ returned: always
+ type: complex
+ creation_date:
+ description: The date and time the image was created.
+ returned: always
+ type: str
+ sample: '2017-10-16T19:22:13.000Z'
+ description:
+ description: The description of the AMI.
+ returned: always
+ type: str
+ sample: ''
+ ena_support:
+ description: Whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ hypervisor:
+ description: The hypervisor type of the image.
+ returned: always
+ type: str
+ sample: xen
+ image_id:
+ description: The ID of the AMI.
+ returned: always
+ type: str
+ sample: ami-5b466623
+ image_location:
+ description: The location of the AMI.
+ returned: always
+ type: str
+ sample: 123456789012/Webapp
+ image_type:
+ description: The type of image.
+ returned: always
+ type: str
+ sample: machine
+ launch_permissions:
+ description: A List of AWS accounts may launch the AMI.
+ returned: When image is owned by calling account and I(describe_image_attributes=true).
+ type: list
+ elements: dict
+ contains:
+ group:
+ description: A value of 'all' means the AMI is public.
+ type: str
+ user_id:
+ description: An AWS account ID with permissions to launch the AMI.
+ type: str
+ sample: [{"group": "all"}, {"user_id": "123456789012"}]
+ name:
+ description: The name of the AMI that was provided during image creation.
+ returned: always
+ type: str
+ sample: Webapp
+ owner_id:
+ description: The AWS account ID of the image owner.
+ returned: always
+ type: str
+ sample: '123456789012'
+ public:
+ description: Whether the image has public launch permissions.
+ returned: always
+ type: bool
+ sample: true
+ root_device_name:
+ description: The device name of the root device.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ sriov_net_support:
+ description: Whether enhanced networking is enabled.
+ returned: always
+ type: str
+ sample: simple
+ state:
+ description: The current state of the AMI.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the image.
+ returned: always
+ type: dict
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_images(ec2_client, module):
+
+ image_ids = module.params.get("image_ids")
+ owners = module.params.get("owners")
+ executable_users = module.params.get("executable_users")
+ filters = module.params.get("filters")
+ owner_param = []
+
+ # describe_images is *very* slow if you pass the `Owners`
+ # param (unless it's self), for some reason.
+ # Converting the owners to filters and removing from the
+ # owners param greatly speeds things up.
+ # Implementation based on aioue's suggestion in #24886
+ for owner in owners:
+ if owner.isdigit():
+ if 'owner-id' not in filters:
+ filters['owner-id'] = list()
+ filters['owner-id'].append(owner)
+ elif owner == 'self':
+ # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ owner_param.append(owner)
+ else:
+ if 'owner-alias' not in filters:
+ filters['owner-alias'] = list()
+ filters['owner-alias'].append(owner)
+
+ filters = ansible_dict_to_boto3_filter_list(filters)
+
+ try:
+ images = ec2_client.describe_images(aws_retry=True, ImageIds=image_ids, Filters=filters, Owners=owner_param,
+ ExecutableUsers=executable_users)
+ images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, msg="error describing images")
+ for image in images:
+ try:
+ image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
+ if module.params.get("describe_image_attributes"):
+ launch_permissions = ec2_client.describe_image_attribute(aws_retry=True, Attribute='launchPermission',
+ ImageId=image['image_id'])['LaunchPermissions']
+ image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
+ except is_boto3_error_code('AuthFailure'):
+ # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
+ pass
+ except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(err, 'Failed to describe AMI')
+
+ images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
+ module.exit_json(images=images)
+
+
+def main():
+
+ argument_spec = dict(
+ image_ids=dict(default=[], type='list', elements='str', aliases=['image_id']),
+ filters=dict(default={}, type='dict'),
+ owners=dict(default=[], type='list', elements='str', aliases=['owner']),
+ executable_users=dict(default=[], type='list', elements='str', aliases=['executable_user']),
+ describe_image_attributes=dict(default=False, type='bool')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_images(ec2_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py
new file mode 100644
index 00000000..4c3094b9
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py
@@ -0,0 +1,666 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eip
+version_added: 5.0.0
+short_description: manages EC2 elastic IP (EIP) addresses.
+description:
+ - This module can allocate or release an EIP.
+ - This module can associate/disassociate an EIP with instances or network interfaces.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ device_id:
+ description:
+ - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id.
+ - The I(instance_id) alias has been deprecated and will be removed after 2022-12-01.
+ required: false
+ aliases: [ instance_id ]
+ type: str
+ public_ip:
+ description:
+ - The IP address of a previously allocated EIP.
+ - When I(state=present) and device is specified, the EIP is associated with the device.
+ - When I(state=absent) and device is specified, the EIP is disassociated from the device.
+ aliases: [ ip ]
+ type: str
+ state:
+ description:
+ - When C(state=present), allocate an EIP or associate an existing EIP with a device.
+ - When C(state=absent), disassociate the EIP from the device and optionally release it.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ in_vpc:
+ description:
+ - Allocate an EIP inside a VPC or not.
+ - Required if specifying an ENI with I(device_id).
+ default: false
+ type: bool
+ reuse_existing_ip_allowed:
+ description:
+ - Reuse an EIP that is not associated to a device (when available), instead of allocating a new one.
+ default: false
+ type: bool
+ release_on_disassociation:
+ description:
+ - Whether or not to automatically release the EIP when it is disassociated.
+ default: false
+ type: bool
+ private_ip_address:
+ description:
+ - The primary or secondary private IP address to associate with the Elastic IP address.
+ type: str
+ allow_reassociation:
+ description:
+ - Specify this option to allow an Elastic IP address that is already associated with another
+ network interface or instance to be re-associated with the specified instance or interface.
+ default: false
+ type: bool
+ tag_name:
+ description:
+ - When I(reuse_existing_ip_allowed=true), supplement with this option to only reuse
+ an Elastic IP if it is tagged with I(tag_name).
+ type: str
+ tag_value:
+ description:
+ - Supplements I(tag_name) but also checks that the value of the tag provided in I(tag_name) matches I(tag_value).
+ type: str
+ public_ipv4_pool:
+ description:
+ - Allocates the new Elastic IP from the provided public IPv4 pool (BYOIP)
+ only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true).
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+author:
+ - "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
+notes:
+ - There may be a delay between the time the EIP is assigned and when
+ the cloud instance is reachable via the new address. Use wait_for and
+ pause to delay further playbook execution until the instance is reachable,
+ if necessary.
+ - This module returns multiple changed statuses on disassociation or release.
+ It returns an overall status based on any changes occurring. It also returns
+ individual changed statuses for disassociation and release.
+ - Support for I(tags) and I(purge_tags) was added in release 2.1.0.
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: associate an elastic IP with an instance
+ amazon.aws.ec2_eip:
+ device_id: i-1212f003
+ ip: 93.184.216.119
+
+- name: associate an elastic IP with a device
+ amazon.aws.ec2_eip:
+ device_id: eni-c8ad70f3
+ ip: 93.184.216.119
+
+- name: associate an elastic IP with a device and allow reassociation
+ amazon.aws.ec2_eip:
+ device_id: eni-c8ad70f3
+ public_ip: 93.184.216.119
+ allow_reassociation: true
+
+- name: disassociate an elastic IP from an instance
+ amazon.aws.ec2_eip:
+ device_id: i-1212f003
+ ip: 93.184.216.119
+ state: absent
+
+- name: disassociate an elastic IP with a device
+ amazon.aws.ec2_eip:
+ device_id: eni-c8ad70f3
+ ip: 93.184.216.119
+ state: absent
+
+- name: allocate a new elastic IP and associate it with an instance
+ amazon.aws.ec2_eip:
+ device_id: i-1212f003
+
+- name: allocate a new elastic IP without associating it to anything
+ amazon.aws.ec2_eip:
+ state: present
+ register: eip
+
+- name: output the IP
+ ansible.builtin.debug:
+ msg: "Allocated IP is {{ eip.public_ip }}"
+
+- name: provision new instances with ec2
+ amazon.aws.ec2:
+ keypair: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: true
+ group: webserver
+ count: 3
+ register: ec2
+
+- name: associate new elastic IPs with each of the instances
+ amazon.aws.ec2_eip:
+ device_id: "{{ item }}"
+ loop: "{{ ec2.instance_ids }}"
+
+- name: allocate a new elastic IP inside a VPC in us-west-2
+ amazon.aws.ec2_eip:
+ region: us-west-2
+ in_vpc: true
+ register: eip
+
+- name: output the IP
+ ansible.builtin.debug:
+ msg: "Allocated IP inside a VPC is {{ eip.public_ip }}"
+
+- name: allocate eip - reuse unallocated ips (if found) with FREE tag
+ amazon.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: FREE
+
+- name: allocate eip - reuse unallocated ips if tag reserved is nope
+ amazon.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: reserved
+ tag_value: nope
+
+- name: allocate new eip - from servers given ipv4 pool
+ amazon.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
+
+- name: allocate eip - from a given pool (if no free addresses where dev-servers tag is dynamic)
+ amazon.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: dev-servers
+ public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
+
+- name: allocate eip from pool - check if tag reserved_for exists and value is our hostname
+ amazon.aws.ec2_eip:
+ region: us-east-1
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: reserved_for
+ tag_value: "{{ inventory_hostname }}"
+ public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
+'''
+
+RETURN = '''
+allocation_id:
+ description: allocation_id of the elastic ip
+ returned: on success
+ type: str
+ sample: eipalloc-51aa3a6c
+public_ip:
+ description: an elastic ip address
+ returned: on success
+ type: str
+ sample: 52.88.159.209
+'''
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+
+
+def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True):
+ if address_is_associated_with_device(ec2, module, address, device_id, is_instance):
+ return {'changed': False}
+
+ # If we're in check mode, nothing else to do
+ if not check_mode:
+ if is_instance:
+ try:
+ params = dict(
+ InstanceId=device_id,
+ AllowReassociation=allow_reassociation,
+ )
+ if private_ip_address:
+ params['PrivateIpAddress'] = private_ip_address
+ if address['Domain'] == 'vpc':
+ params['AllocationId'] = address['AllocationId']
+ else:
+ params['PublicIp'] = address['PublicIp']
+ res = ec2.associate_address(aws_retry=True, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id)
+ module.fail_json_aws(e, msg=msg)
+ else:
+ params = dict(
+ NetworkInterfaceId=device_id,
+ AllocationId=address['AllocationId'],
+ AllowReassociation=allow_reassociation,
+ )
+
+ if private_ip_address:
+ params['PrivateIpAddress'] = private_ip_address
+
+ try:
+ res = ec2.associate_address(aws_retry=True, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id)
+ module.fail_json_aws(e, msg=msg)
+ if not res:
+ module.fail_json_aws(e, msg='Association failed.')
+
+ return {'changed': True}
+
+
+def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=True):
+ if not address_is_associated_with_device(ec2, module, address, device_id, is_instance):
+ return {'changed': False}
+
+ # If we're in check mode, nothing else to do
+ if not check_mode:
+ try:
+ if address['Domain'] == 'vpc':
+ res = ec2.disassociate_address(
+ AssociationId=address['AssociationId'], aws_retry=True
+ )
+ else:
+ res = ec2.disassociate_address(
+ PublicIp=address['PublicIp'], aws_retry=True
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Dissassociation of Elastic IP failed")
+
+ return {'changed': True}
+
+
+@AWSRetry.jittered_backoff()
+def find_address(ec2, module, public_ip, device_id, is_instance=True):
+ """ Find an existing Elastic IP address """
+ filters = []
+ kwargs = {}
+
+ if public_ip:
+ kwargs["PublicIps"] = [public_ip]
+ elif device_id:
+ if is_instance:
+ filters.append({"Name": 'instance-id', "Values": [device_id]})
+ else:
+ filters.append({'Name': 'network-interface-id', "Values": [device_id]})
+
+ if len(filters) > 0:
+ kwargs["Filters"] = filters
+ elif len(filters) == 0 and public_ip is None:
+ return None
+
+ try:
+ addresses = ec2.describe_addresses(**kwargs)
+ except is_boto3_error_code('InvalidAddress.NotFound') as e:
+ # If we're releasing and we can't find it, it's already gone...
+ if module.params.get('state') == 'absent':
+ module.exit_json(changed=False, disassociated=False, released=False)
+ module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses")
+
+ addresses = addresses["Addresses"]
+ if len(addresses) == 1:
+ return addresses[0]
+ elif len(addresses) > 1:
+ msg = "Found more than one address using args {0}".format(kwargs)
+ msg += "Addresses found: {0}".format(addresses)
+ module.fail_json_aws(botocore.exceptions.ClientError, msg=msg)
+
+
+def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True):
+ """ Check if the elastic IP is currently associated with the device """
+ address = find_address(ec2, module, address["PublicIp"], device_id, is_instance)
+ if address:
+ if is_instance:
+ if "InstanceId" in address and address["InstanceId"] == device_id:
+ return address
+ else:
+ if "NetworkInterfaceId" in address and address["NetworkInterfaceId"] == device_id:
+ return address
+ return False
+
+
+def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None):
+ """ Allocate a new elastic IP address (when needed) and return it """
+ if not domain:
+ domain = 'standard'
+
+ if reuse_existing_ip_allowed:
+ filters = []
+ filters.append({'Name': 'domain', "Values": [domain]})
+
+ if tag_dict is not None:
+ filters += ansible_dict_to_boto3_filter_list(tag_dict)
+
+ try:
+ all_addresses = ec2.describe_addresses(Filters=filters, aws_retry=True)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses")
+
+ all_addresses = all_addresses["Addresses"]
+
+ if domain == 'vpc':
+ unassociated_addresses = [a for a in all_addresses
+ if not a.get('AssociationId', None)]
+ else:
+ unassociated_addresses = [a for a in all_addresses
+ if not a['InstanceId']]
+ if unassociated_addresses:
+ return unassociated_addresses[0], False
+
+ if public_ipv4_pool:
+ return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True
+
+ try:
+ if check_mode:
+ return None, True
+ result = ec2.allocate_address(Domain=domain, aws_retry=True), True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address")
+ return result
+
+
+def release_address(ec2, module, address, check_mode):
+ """ Release a previously allocated elastic IP address """
+
+ # If we're in check mode, nothing else to do
+ if not check_mode:
+ try:
+ result = ec2.release_address(AllocationId=address['AllocationId'], aws_retry=True)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't release Elastic IP address")
+
+ return {'changed': True}
+
+
+@AWSRetry.jittered_backoff()
+def describe_eni_with_backoff(ec2, module, device_id):
+ try:
+ return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id])
+ except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e:
+ module.fail_json_aws(e, msg="Couldn't get list of network interfaces.")
+
+
+def find_device(ec2, module, device_id, is_instance=True):
+ """ Attempt to find the EC2 instance and return it """
+
+ if is_instance:
+ try:
+ paginator = ec2.get_paginator('describe_instances')
+ reservations = list(paginator.paginate(InstanceIds=[device_id]).search('Reservations[]'))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't get list of instances")
+
+ if len(reservations) == 1:
+ instances = reservations[0]['Instances']
+ if len(instances) == 1:
+ return instances[0]
+ else:
+ try:
+ interfaces = describe_eni_with_backoff(ec2, module, device_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't get list of network interfaces.")
+ if len(interfaces) == 1:
+ return interfaces[0]
+
+
+def ensure_present(ec2, module, domain, address, private_ip_address, device_id,
+ reuse_existing_ip_allowed, allow_reassociation, check_mode, is_instance=True):
+ changed = False
+
+ # Return the EIP object since we've been given a public IP
+ if not address:
+ if check_mode:
+ return {'changed': True}
+
+ address, changed = allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode)
+
+ if device_id:
+ # Allocate an IP for instance since no public_ip was provided
+ if is_instance:
+ instance = find_device(ec2, module, device_id)
+ if reuse_existing_ip_allowed:
+ if instance['VpcId'] and len(instance['VpcId']) > 0 and domain is None:
+ msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc"
+ module.fail_json_aws(botocore.exceptions.ClientError, msg=msg)
+
+ # Associate address object (provided or allocated) with instance
+ assoc_result = associate_ip_and_device(
+ ec2, module, address, private_ip_address, device_id, allow_reassociation,
+ check_mode
+ )
+ else:
+ instance = find_device(ec2, module, device_id, is_instance=False)
+ # Associate address object (provided or allocated) with instance
+ assoc_result = associate_ip_and_device(
+ ec2, module, address, private_ip_address, device_id, allow_reassociation,
+ check_mode, is_instance=False
+ )
+
+ changed = changed or assoc_result['changed']
+
+ return {'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId']}
+
+
+def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True):
+ if not address:
+ return {'changed': False}
+
+ # disassociating address from instance
+ if device_id:
+ if is_instance:
+ return disassociate_ip_and_device(
+ ec2, module, address, device_id, check_mode
+ )
+ else:
+ return disassociate_ip_and_device(
+ ec2, module, address, device_id, check_mode, is_instance=False
+ )
+ # releasing address
+ else:
+ return release_address(ec2, module, address, check_mode)
+
+
+def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool):
+ # type: (EC2Connection, AnsibleAWSModule, str, bool, str) -> Address
+ """ Overrides botocore's allocate_address function to support BYOIP """
+ if check_mode:
+ return None
+
+ params = {}
+
+ if domain is not None:
+ params['Domain'] = domain
+
+ if public_ipv4_pool is not None:
+ params['PublicIpv4Pool'] = public_ipv4_pool
+
+ try:
+ result = ec2.allocate_address(aws_retry=True, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address")
+ return result
+
+
+def generate_tag_dict(module, tag_name, tag_value):
+ # type: (AnsibleAWSModule, str, str) -> Optional[Dict]
+ """ Generates a dictionary to be passed as a filter to Amazon """
+ if tag_name and not tag_value:
+ if tag_name.startswith('tag:'):
+ tag_name = tag_name.strip('tag:')
+ return {'tag-key': tag_name}
+
+ elif tag_name and tag_value:
+ if not tag_name.startswith('tag:'):
+ tag_name = 'tag:' + tag_name
+ return {tag_name: tag_value}
+
+ elif tag_value and not tag_name:
+ module.fail_json(msg="parameters are required together: ('tag_name', 'tag_value')")
+
+
+def main():
+ argument_spec = dict(
+ device_id=dict(required=False, aliases=['instance_id'],
+ deprecated_aliases=[dict(name='instance_id',
+ date='2022-12-01',
+ collection_name='amazon.aws')]),
+ public_ip=dict(required=False, aliases=['ip']),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ in_vpc=dict(required=False, type='bool', default=False),
+ reuse_existing_ip_allowed=dict(required=False, type='bool',
+ default=False),
+ release_on_disassociation=dict(required=False, type='bool', default=False),
+ allow_reassociation=dict(type='bool', default=False),
+ private_ip_address=dict(),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(required=False, type='bool', default=True),
+ tag_name=dict(),
+ tag_value=dict(),
+ public_ipv4_pool=dict()
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_by={
+ 'private_ip_address': ['device_id'],
+ },
+ )
+
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ device_id = module.params.get('device_id')
+ instance_id = module.params.get('instance_id')
+ public_ip = module.params.get('public_ip')
+ private_ip_address = module.params.get('private_ip_address')
+ state = module.params.get('state')
+ in_vpc = module.params.get('in_vpc')
+ domain = 'vpc' if in_vpc else None
+ reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
+ release_on_disassociation = module.params.get('release_on_disassociation')
+ allow_reassociation = module.params.get('allow_reassociation')
+ tag_name = module.params.get('tag_name')
+ tag_value = module.params.get('tag_value')
+ public_ipv4_pool = module.params.get('public_ipv4_pool')
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+
+ if instance_id:
+ is_instance = True
+ device_id = instance_id
+ else:
+ if device_id and device_id.startswith('i-'):
+ is_instance = True
+ elif device_id:
+ if device_id.startswith('eni-') and not in_vpc:
+ module.fail_json(msg="If you are specifying an ENI, in_vpc must be true")
+ is_instance = False
+
+ # Tags for *searching* for an EIP.
+ tag_dict = generate_tag_dict(module, tag_name, tag_value)
+
+ try:
+ if device_id:
+ address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance)
+ else:
+ address = find_address(ec2, module, public_ip, None)
+
+ if state == 'present':
+ if device_id:
+ result = ensure_present(
+ ec2, module, domain, address, private_ip_address, device_id,
+ reuse_existing_ip_allowed, allow_reassociation,
+ module.check_mode, is_instance=is_instance
+ )
+ if 'allocation_id' not in result:
+ # Don't check tags on check_mode here - no EIP to pass through
+ module.exit_json(**result)
+ else:
+ if address:
+ result = {
+ 'changed': False,
+ 'public_ip': address['PublicIp'],
+ 'allocation_id': address['AllocationId']
+ }
+ else:
+ address, changed = allocate_address(
+ ec2, module, domain, reuse_existing_ip_allowed,
+ module.check_mode, tag_dict, public_ipv4_pool
+ )
+ if address:
+ result = {
+ 'changed': changed,
+ 'public_ip': address['PublicIp'],
+ 'allocation_id': address['AllocationId']
+ }
+ else:
+ # Don't check tags on check_mode here - no EIP to pass through
+ result = {
+ 'changed': changed
+ }
+ module.exit_json(**result)
+
+ result['changed'] |= ensure_ec2_tags(
+ ec2, module, result['allocation_id'],
+ resource_type='elastic-ip', tags=tags, purge_tags=purge_tags)
+ else:
+ if device_id:
+ disassociated = ensure_absent(
+ ec2, module, address, device_id, module.check_mode, is_instance=is_instance
+ )
+
+ if release_on_disassociation and disassociated['changed']:
+ released = release_address(ec2, module, address, module.check_mode)
+ result = {
+ 'changed': True,
+ 'disassociated': disassociated['changed'],
+ 'released': released['changed']
+ }
+ else:
+ result = {
+ 'changed': disassociated['changed'],
+ 'disassociated': disassociated['changed'],
+ 'released': False
+ }
+ else:
+ released = release_address(ec2, module, address, module.check_mode)
+ result = {
+ 'changed': released['changed'],
+ 'disassociated': False,
+ 'released': released['changed']
+ }
+
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(str(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py
new file mode 100644
index 00000000..c94f164f
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eip_info
+version_added: 5.0.0
+short_description: List EC2 EIP details
+description:
+ - List details of EC2 Elastic IP addresses.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - "Brad Macpherson (@iiibrad)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and filter
+ value. See U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-addresses.html#options)
+ for possible filters. Filter names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details or the AWS region,
+# see the AWS Guide for details.
+
+- name: List all EIP addresses in the current region.
+ amazon.aws.ec2_eip_info:
+ register: regional_eip_addresses
+
+- name: List all EIP addresses for a VM.
+ amazon.aws.ec2_eip_info:
+ filters:
+ instance-id: i-123456789
+ register: my_vm_eips
+
+- ansible.builtin.debug:
+ msg: "{{ my_vm_eips.addresses | selectattr('private_ip_address', 'equalto', '10.0.0.5') }}"
+
+- name: List all EIP addresses for several VMs.
+ amazon.aws.ec2_eip_info:
+ filters:
+ instance-id:
+ - i-123456789
+ - i-987654321
+ register: my_vms_eips
+
+- name: List all EIP addresses using the 'Name' tag as a filter.
+ amazon.aws.ec2_eip_info:
+ filters:
+ tag:Name: www.example.com
+ register: my_vms_eips
+
+- name: List all EIP addresses using the Allocation-id as a filter
+ amazon.aws.ec2_eip_info:
+ filters:
+ allocation-id: eipalloc-64de1b01
+ register: my_vms_eips
+
+# Set the variable eip_alloc to the value of the first allocation_id
+# and set the variable my_pub_ip to the value of the first public_ip
+- ansible.builtin.set_fact:
+ eip_alloc: my_vms_eips.addresses[0].allocation_id
+ my_pub_ip: my_vms_eips.addresses[0].public_ip
+
+'''
+
+
+RETURN = '''
+addresses:
+ description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP.
+ returned: on success
+ type: list
+ sample: [{
+ "allocation_id": "eipalloc-64de1b01",
+ "association_id": "eipassoc-0fe9ce90d6e983e97",
+ "domain": "vpc",
+ "instance_id": "i-01020cfeb25b0c84f",
+ "network_interface_id": "eni-02fdeadfd4beef9323b",
+ "network_interface_owner_id": "0123456789",
+ "private_ip_address": "10.0.0.1",
+ "public_ip": "54.81.104.1",
+ "tags": {
+ "Name": "test-vm-54.81.104.1"
+ }
+ }]
+
+'''
+
+try:
+ from botocore.exceptions import (BotoCoreError, ClientError)
+except ImportError:
+ pass # caught by imported AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_eips_details(module):
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ filters = module.params.get("filters")
+ try:
+ response = connection.describe_addresses(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list(filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(
+ e,
+ msg="Error retrieving EIPs")
+
+ addresses = camel_dict_to_snake_dict(response)['addresses']
+ for address in addresses:
+ if 'tags' in address:
+ address['tags'] = boto3_tag_list_to_ansible_dict(address['tags'])
+ return addresses
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec=dict(
+ filters=dict(type='dict', default={})
+ ),
+ supports_check_mode=True
+ )
+
+ module.exit_json(changed=False, addresses=get_eips_details(module))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
new file mode 100644
index 00000000..4b99f803
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
@@ -0,0 +1,875 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni
+version_added: 1.0.0
+short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
+description:
+ - Create and optionally attach an Elastic Network Interface (ENI) to an instance.
+ - If I(eni_id) or I(private_ip) is provided, the existing ENI (if any) will be modified.
+ - The I(attached) parameter controls the attachment status of the network interface.
+author:
+ - "Rob White (@wimnat)"
+ - "Mike Healey (@healem)"
+options:
+ eni_id:
+ description:
+ - The ID of the ENI (to modify).
+ - If I(eni_id=None) and I(state=present), a new ENI will be created.
+ type: str
+ instance_id:
+ description:
+ - Instance ID that you wish to attach ENI to.
+ type: str
+ private_ip_address:
+ description:
+ - Private IP address.
+ type: str
+ subnet_id:
+ description:
+ - ID of subnet in which to create the ENI.
+ type: str
+ description:
+ description:
+ - Optional description of the ENI.
+ type: str
+ security_groups:
+ description:
+ - List of security groups associated with the interface.
+ - Ignored when I(state=absent).
+ type: list
+ elements: str
+ state:
+ description:
+ - Create or delete ENI.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ device_index:
+ description:
+ - The index of the device for the network interface attachment on the instance.
+ default: 0
+ type: int
+ attached:
+ description:
+ - Specifies if network interface should be attached or detached from instance. If omitted, attachment status
+ won't change
+ type: bool
+ force_detach:
+ description:
+ - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)
+ or when deleting an interface with I(state=absent).
+ default: false
+ type: bool
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
+ interface is being modified, not on creation.
+ required: false
+ type: bool
+ source_dest_check:
+ description:
+ - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
+ You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ type: bool
+ secondary_private_ip_addresses:
+ description:
+ - A list of IP addresses to assign as secondary IP addresses to the network interface.
+ - This option is mutually exclusive of I(secondary_private_ip_address_count).
+ required: false
+ type: list
+ elements: str
+ purge_secondary_private_ip_addresses:
+ description:
+ - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
+ - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.
+ default: false
+ type: bool
+ secondary_private_ip_address_count:
+ description:
+ - The number of secondary IP addresses to assign to the network interface.
+ - This option is mutually exclusive of I(secondary_private_ip_addresses).
+ required: false
+ type: int
+ allow_reassignment:
+ description:
+ - Indicates whether to allow an IP address that is already assigned to another network interface or instance
+ to be reassigned to the specified network interface.
+ required: false
+ default: false
+ type: bool
+ name:
+ description:
+ - Name for the ENI. This will create a tag with the key C(Name) and the value assigned here.
+ - This can be used in conjunction with I(subnet_id) as another means of identifiying a network interface.
+ - AWS does not enforce unique C(Name) tags, so duplicate names are possible if you configure it that way.
+ If that is the case, you will need to provide other identifying information such as I(private_ip_address) or I(eni_id).
+ required: false
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+notes:
+ - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),
+ or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.
+ - Support for I(tags) and I(purge_tags) was added in release 1.3.0.
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ENI. As no security group is defined, ENI will be created in default security group
+- amazon.aws.ec2_eni:
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI and attach it to an instance
+- amazon.aws.ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI with two secondary addresses
+- amazon.aws.ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ secondary_private_ip_address_count: 2
+
+# Assign a secondary IP address to an existing ENI
+# This will purge any existing IPs
+- amazon.aws.ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_addresses:
+ - 172.16.1.1
+
+# Remove any secondary IP addresses from an existing ENI
+- amazon.aws.ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_address_count: 0
+
+# Destroy an ENI, detaching it from any instance if necessary
+- amazon.aws.ec2_eni:
+ eni_id: eni-xxxxxxx
+ force_detach: true
+ state: absent
+
+# Update an ENI
+- amazon.aws.ec2_eni:
+ eni_id: eni-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Update an ENI using name and subnet_id
+- amazon.aws.ec2_eni:
+ name: eni-20
+ subnet_id: subnet-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Update an ENI identifying it by private_ip_address and subnet_id
+- amazon.aws.ec2_eni:
+ subnet_id: subnet-xxxxxxx
+ private_ip_address: 172.16.1.1
+ description: "My new description"
+
+# Detach an ENI from an instance
+- amazon.aws.ec2_eni:
+ eni_id: eni-xxxxxxx
+ instance_id: None
+ state: present
+
+### Delete an interface on termination
+# First create the interface
+- amazon.aws.ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ register: eni
+
+# Modify the interface to enable the delete_on_terminaton flag
+- amazon.aws.ec2_eni:
+ eni_id: "{{ eni.interface.id }}"
+ delete_on_termination: true
+
+'''
+
+
+RETURN = '''
+interface:
+ description: Network interface attributes
+ returned: when state != absent
+ type: complex
+ contains:
+ description:
+ description: interface description
+ type: str
+ sample: Firewall network interface
+ groups:
+ description: list of security groups
+ type: list
+ elements: dict
+ sample: [ { "sg-f8a8a9da": "default" } ]
+ id:
+ description: network interface id
+ type: str
+ sample: "eni-1d889198"
+ mac_address:
+ description: interface's physical address
+ type: str
+ sample: "00:00:5E:00:53:23"
+ name:
+ description: The name of the ENI
+ type: str
+ sample: "my-eni-20"
+ owner_id:
+ description: aws account id
+ type: str
+ sample: 812381371
+ private_ip_address:
+ description: primary ip address of this interface
+ type: str
+ sample: 10.20.30.40
+ private_ip_addresses:
+ description: list of all private ip addresses associated to this interface
+ type: list
+ elements: dict
+ sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
+ source_dest_check:
+ description: value of source/dest check flag
+ type: bool
+ sample: True
+ status:
+ description: network interface status
+ type: str
+ sample: "pending"
+ subnet_id:
+ description: which vpc subnet the interface is bound
+ type: str
+ sample: subnet-b0a0393c
+ tags:
+ description: The dictionary of tags associated with the ENI
+ type: dict
+ sample: { "Name": "my-eni", "group": "Finance" }
+ vpc_id:
+ description: which vpc this network interface is bound
+ type: str
+ sample: vpc-9a9a9da
+
+'''
+
+import time
+from ipaddress import ip_address
+from ipaddress import ip_network
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ if "PrivateIpAddresses" in interface:
+ for ip in interface["PrivateIpAddresses"]:
+ private_addresses.append({'private_ip_address': ip["PrivateIpAddress"], 'primary_address': ip["Primary"]})
+
+ groups = {}
+ if "Groups" in interface:
+ for group in interface["Groups"]:
+ groups[group["GroupId"]] = group["GroupName"]
+
+ interface_info = {'id': interface.get("NetworkInterfaceId"),
+ 'subnet_id': interface.get("SubnetId"),
+ 'vpc_id': interface.get("VpcId"),
+ 'description': interface.get("Description"),
+ 'owner_id': interface.get("OwnerId"),
+ 'status': interface.get("Status"),
+ 'mac_address': interface.get("MacAddress"),
+ 'private_ip_address': interface.get("PrivateIpAddress"),
+ 'source_dest_check': interface.get("SourceDestCheck"),
+ 'groups': groups,
+ 'private_ip_addresses': private_addresses
+ }
+
+ if "TagSet" in interface:
+ tags = boto3_tag_list_to_ansible_dict(interface["TagSet"])
+ if "Name" in tags:
+ interface_info["name"] = tags["Name"]
+ interface_info["tags"] = tags
+
+ if "Attachment" in interface:
+ interface_info['attachment'] = {
+ 'attachment_id': interface["Attachment"].get("AttachmentId"),
+ 'instance_id': interface["Attachment"].get("InstanceId"),
+ 'device_index': interface["Attachment"].get("DeviceIndex"),
+ 'status': interface["Attachment"].get("Status"),
+ 'attach_time': interface["Attachment"].get("AttachTime"),
+ 'delete_on_termination': interface["Attachment"].get("DeleteOnTermination"),
+ }
+
+ return interface_info
+
+
+def correct_ips(connection, ip_list, module, eni_id):
+ eni = describe_eni(connection, module, eni_id)
+ private_addresses = set()
+ if "PrivateIpAddresses" in eni:
+ for ip in eni["PrivateIpAddresses"]:
+ private_addresses.add(ip["PrivateIpAddress"])
+
+ ip_set = set(ip_list)
+
+ return ip_set.issubset(private_addresses)
+
+
+def absent_ips(connection, ip_list, module, eni_id):
+ eni = describe_eni(connection, module, eni_id)
+ private_addresses = set()
+ if "PrivateIpAddresses" in eni:
+ for ip in eni["PrivateIpAddresses"]:
+ private_addresses.add(ip["PrivateIpAddress"])
+
+ ip_set = set(ip_list)
+
+ return not ip_set.union(private_addresses)
+
+
+def correct_ip_count(connection, ip_count, module, eni_id):
+ eni = describe_eni(connection, module, eni_id)
+ private_addresses = set()
+ if "PrivateIpAddresses" in eni:
+ for ip in eni["PrivateIpAddresses"]:
+ private_addresses.add(ip["PrivateIpAddress"])
+
+ if len(private_addresses) == ip_count:
+ return True
+ else:
+ return False
+
+
+def wait_for(function_pointer, *args):
+ max_wait = 30
+ interval_time = 3
+ current_wait = 0
+ while current_wait < max_wait:
+ time.sleep(interval_time)
+ current_wait += interval_time
+ if function_pointer(*args):
+ break
+
+
+def create_eni(connection, vpc_id, module):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ if instance_id == 'None':
+ instance_id = None
+ device_index = module.params.get("device_index")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = get_ec2_security_group_ids_from_names(
+ module.params.get('security_groups'),
+ connection,
+ vpc_id=vpc_id,
+ boto3=True
+ )
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ changed = False
+
+ tags = module.params.get("tags") or dict()
+ name = module.params.get("name")
+ # Make sure that the 'name' parameter sets the Name tag
+ if name:
+ tags['Name'] = name
+
+ try:
+ args = {"SubnetId": subnet_id}
+ if private_ip_address:
+ args["PrivateIpAddress"] = private_ip_address
+ if description:
+ args["Description"] = description
+ if len(security_groups) > 0:
+ args["Groups"] = security_groups
+ if tags:
+ args["TagSpecifications"] = boto3_tag_specifications(tags, types='network-interface')
+
+ # check if provided private_ip_address is within the subnet's address range
+ if private_ip_address:
+ cidr_block = connection.describe_subnets(SubnetIds=[str(subnet_id)])['Subnets'][0]['CidrBlock']
+ valid_private_ip = ip_address(private_ip_address) in ip_network(cidr_block)
+ if not valid_private_ip:
+ module.fail_json(changed=False, msg="Error: cannot create ENI - Address does not fall within the subnet's address range.")
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have created ENI if not in check mode.")
+
+ eni_dict = connection.create_network_interface(aws_retry=True, **args)
+ eni = eni_dict["NetworkInterface"]
+ # Once we have an ID make sure we're always modifying the same object
+ eni_id = eni["NetworkInterfaceId"]
+ get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])
+
+ if attached and instance_id is not None:
+ try:
+ connection.attach_network_interface(
+ aws_retry=True,
+ InstanceId=instance_id,
+ DeviceIndex=device_index,
+ NetworkInterfaceId=eni["NetworkInterfaceId"]
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ raise
+ get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])
+
+ if secondary_private_ip_address_count is not None:
+ try:
+ connection.assign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni["NetworkInterfaceId"],
+ SecondaryPrivateIpAddressCount=secondary_private_ip_address_count
+ )
+ wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ raise
+
+ if secondary_private_ip_addresses is not None:
+ try:
+ connection.assign_private_ip_addresses(
+ NetworkInterfaceId=eni["NetworkInterfaceId"],
+ PrivateIpAddresses=secondary_private_ip_addresses
+ )
+ wait_for(correct_ips, connection, secondary_private_ip_addresses, module, eni_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ raise
+
+ # Refresh the eni data
+ eni = describe_eni(connection, module, eni_id)
+ changed = True
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e,
+ "Failed to create eni {0} for {1} in {2} with {3}".format(name, subnet_id, vpc_id, private_ip_address)
+ )
+
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def modify_eni(connection, module, eni):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ device_index = module.params.get("device_index")
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+ source_dest_check = module.params.get("source_dest_check")
+ delete_on_termination = module.params.get("delete_on_termination")
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ allow_reassignment = module.params.get("allow_reassignment")
+ changed = False
+ tags = module.params.get("tags")
+ name = module.params.get("name")
+ purge_tags = module.params.get("purge_tags")
+
+ eni = uniquely_find_eni(connection, module, eni)
+ eni_id = eni["NetworkInterfaceId"]
+
+ try:
+ if description is not None:
+ if "Description" not in eni or eni["Description"] != description:
+ if not module.check_mode:
+ connection.modify_network_interface_attribute(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ Description={'Value': description}
+ )
+ changed = True
+ if len(security_groups) > 0:
+ groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni["VpcId"], boto3=True)
+ if sorted(get_sec_group_list(eni["Groups"])) != sorted(groups):
+ if not module.check_mode:
+ connection.modify_network_interface_attribute(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ Groups=groups
+ )
+ changed = True
+ if source_dest_check is not None:
+ if "SourceDestCheck" not in eni or eni["SourceDestCheck"] != source_dest_check:
+ if not module.check_mode:
+ connection.modify_network_interface_attribute(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ SourceDestCheck={'Value': source_dest_check}
+ )
+ changed = True
+ if delete_on_termination is not None and "Attachment" in eni:
+ if eni["Attachment"]["DeleteOnTermination"] is not delete_on_termination:
+ if not module.check_mode:
+ connection.modify_network_interface_attribute(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ Attachment={'AttachmentId': eni["Attachment"]["AttachmentId"],
+ 'DeleteOnTermination': delete_on_termination}
+ )
+ if delete_on_termination:
+ waiter = "network_interface_delete_on_terminate"
+ else:
+ waiter = "network_interface_no_delete_on_terminate"
+ get_waiter(connection, waiter).wait(NetworkInterfaceIds=[eni_id])
+ changed = True
+
+ current_secondary_addresses = []
+ if "PrivateIpAddresses" in eni:
+ current_secondary_addresses = [i["PrivateIpAddress"] for i in eni["PrivateIpAddresses"] if not i["Primary"]]
+
+ if secondary_private_ip_addresses is not None:
+ secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
+ if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:
+ if not module.check_mode:
+ connection.unassign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ PrivateIpAddresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)),
+ )
+ wait_for(absent_ips, connection, secondary_addresses_to_remove, module, eni_id)
+ changed = True
+ secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))
+ if secondary_addresses_to_add:
+ if not module.check_mode:
+ connection.assign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ PrivateIpAddresses=secondary_addresses_to_add,
+ AllowReassignment=allow_reassignment
+ )
+ wait_for(correct_ips, connection, secondary_addresses_to_add, module, eni_id)
+ changed = True
+
+ if secondary_private_ip_address_count is not None:
+ current_secondary_address_count = len(current_secondary_addresses)
+ if secondary_private_ip_address_count > current_secondary_address_count:
+ if not module.check_mode:
+ connection.assign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ SecondaryPrivateIpAddressCount=(secondary_private_ip_address_count - current_secondary_address_count),
+ AllowReassignment=allow_reassignment
+ )
+ wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)
+ changed = True
+ elif secondary_private_ip_address_count < current_secondary_address_count:
+ # How many of these addresses do we want to remove
+ if not module.check_mode:
+ secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
+ connection.unassign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count]
+ )
+ wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)
+ changed = True
+
+ if attached is True:
+ if "Attachment" in eni and eni["Attachment"]["InstanceId"] != instance_id:
+ if not module.check_mode:
+ detach_eni(connection, eni, module)
+ connection.attach_network_interface(
+ aws_retry=True,
+ InstanceId=instance_id,
+ DeviceIndex=device_index,
+ NetworkInterfaceId=eni_id,
+ )
+ get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])
+ changed = True
+ if "Attachment" not in eni:
+ if not module.check_mode:
+ connection.attach_network_interface(
+ aws_retry=True,
+ InstanceId=instance_id,
+ DeviceIndex=device_index,
+ NetworkInterfaceId=eni_id,
+ )
+ get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])
+ changed = True
+
+ elif attached is False:
+ changed |= detach_eni(connection, eni, module)
+ get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])
+
+ changed |= manage_tags(connection, module, eni, name, tags, purge_tags)
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to modify eni {0}".format(eni_id))
+
+ eni = describe_eni(connection, module, eni_id)
+ if module.check_mode and changed:
+ module.exit_json(changed=changed, msg="Would have modified ENI: {0} if not in check mode".format(eni['NetworkInterfaceId']))
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def _wait_for_detach(connection, module, eni_id):
+ try:
+ get_waiter(connection, 'network_interface_available').wait(
+ NetworkInterfaceIds=[eni_id],
+ WaiterConfig={'Delay': 5, 'MaxAttempts': 80},
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, "Timeout waiting for ENI {0} to detach".format(eni_id))
+
+
+def delete_eni(connection, module):
+
+ eni = uniquely_find_eni(connection, module)
+ if not eni:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have deleted ENI if not in check mode.")
+
+ eni_id = eni["NetworkInterfaceId"]
+ force_detach = module.params.get("force_detach")
+
+ try:
+ if force_detach is True:
+ if "Attachment" in eni:
+ connection.detach_network_interface(
+ aws_retry=True,
+ AttachmentId=eni["Attachment"]["AttachmentId"],
+ Force=True,
+ )
+ _wait_for_detach(connection, module, eni_id)
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ changed = True
+ else:
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ changed = True
+
+ module.exit_json(changed=changed)
+ except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'):
+ module.exit_json(changed=False)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "Failure during delete of {0}".format(eni_id))
+
+
+def detach_eni(connection, eni, module):
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have detached ENI if not in check mode.")
+
+ eni_id = eni["NetworkInterfaceId"]
+
+ force_detach = module.params.get("force_detach")
+ if "Attachment" in eni:
+ connection.detach_network_interface(
+ aws_retry=True,
+ AttachmentId=eni["Attachment"]["AttachmentId"],
+ Force=force_detach,
+ )
+ _wait_for_detach(connection, module, eni_id)
+ return True
+
+ return False
+
+
+def describe_eni(connection, module, eni_id):
+ try:
+ eni_result = connection.describe_network_interfaces(aws_retry=True, NetworkInterfaceIds=[eni_id])
+ if eni_result["NetworkInterfaces"]:
+ return eni_result["NetworkInterfaces"][0]
+ else:
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to describe eni with id: {0}".format(eni_id))
+
+
+def uniquely_find_eni(connection, module, eni=None):
+
+ if eni:
+ # In the case of create, eni_id will not be a param but we can still get the eni_id after creation
+ if "NetworkInterfaceId" in eni:
+ eni_id = eni["NetworkInterfaceId"]
+ else:
+ eni_id = None
+ else:
+ eni_id = module.params.get("eni_id")
+
+ private_ip_address = module.params.get('private_ip_address')
+ subnet_id = module.params.get('subnet_id')
+ instance_id = module.params.get('instance_id')
+ device_index = module.params.get('device_index')
+ attached = module.params.get('attached')
+ name = module.params.get("name")
+
+ filters = []
+
+ # proceed only if we're unequivocally specifying an ENI
+ if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):
+ return None
+
+ if eni_id:
+ filters.append({'Name': 'network-interface-id',
+ 'Values': [eni_id]})
+
+ if private_ip_address and subnet_id and not filters:
+ filters.append({'Name': 'private-ip-address',
+ 'Values': [private_ip_address]})
+ filters.append({'Name': 'subnet-id',
+ 'Values': [subnet_id]})
+
+ if not attached and instance_id and device_index and not filters:
+ filters.append({'Name': 'attachment.instance-id',
+ 'Values': [instance_id]})
+ filters.append({'Name': 'attachment.device-index',
+ 'Values': [str(device_index)]})
+
+ if name and subnet_id and not filters:
+ filters.append({'Name': 'tag:Name',
+ 'Values': [name]})
+ filters.append({'Name': 'subnet-id',
+ 'Values': [subnet_id]})
+
+ if not filters:
+ return None
+
+ try:
+ eni_result = connection.describe_network_interfaces(aws_retry=True, Filters=filters)["NetworkInterfaces"]
+ if len(eni_result) == 1:
+ return eni_result[0]
+ else:
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to find unique eni with filters: {0}".format(filters))
+
+ return None
+
+
+def get_sec_group_list(groups):
+
+ # Build list of remote security groups
+ remote_security_groups = []
+ for group in groups:
+ remote_security_groups.append(group["GroupId"])
+
+ return remote_security_groups
+
+
+def _get_vpc_id(connection, module, subnet_id):
+
+ try:
+ subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id])
+ return subnets["Subnets"][0]["VpcId"]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to get vpc_id for {0}".format(subnet_id))
+
+
+def manage_tags(connection, module, eni, name, tags, purge_tags):
+ # Do not purge tags unless tags is not None
+ if tags is None:
+ purge_tags = False
+ tags = {}
+
+ if name:
+ tags['Name'] = name
+
+ eni_id = eni['NetworkInterfaceId']
+
+ changed = ensure_ec2_tags(connection, module, eni_id, tags=tags, purge_tags=purge_tags)
+ return changed
+
+
+def main():
+ argument_spec = dict(
+ eni_id=dict(default=None, type='str'),
+ instance_id=dict(default=None, type='str'),
+ private_ip_address=dict(type='str'),
+ subnet_id=dict(type='str'),
+ description=dict(type='str'),
+ security_groups=dict(default=[], type='list', elements='str'),
+ device_index=dict(default=0, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_detach=dict(default='no', type='bool'),
+ source_dest_check=dict(default=None, type='bool'),
+ delete_on_termination=dict(default=None, type='bool'),
+ secondary_private_ip_addresses=dict(default=None, type='list', elements='str'),
+ purge_secondary_private_ip_addresses=dict(default=False, type='bool'),
+ secondary_private_ip_address_count=dict(default=None, type='int'),
+ allow_reassignment=dict(default=False, type='bool'),
+ attached=dict(default=None, type='bool'),
+ name=dict(default=None, type='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
+ ],
+ required_if=([
+ ('attached', True, ['instance_id']),
+ ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])
+ ]),
+ supports_check_mode=True,
+ )
+
+ retry_decorator = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=['IncorrectState'],
+ )
+ connection = module.client('ec2', retry_decorator=retry_decorator)
+ state = module.params.get("state")
+
+ if state == 'present':
+ eni = uniquely_find_eni(connection, module)
+ if eni is None:
+ subnet_id = module.params.get("subnet_id")
+ if subnet_id is None:
+ module.fail_json(msg='subnet_id is required when creating a new ENI')
+
+ vpc_id = _get_vpc_id(connection, module, subnet_id)
+ create_eni(connection, vpc_id, module)
+ else:
+ modify_eni(connection, module, eni)
+
+ elif state == 'absent':
+ delete_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
new file mode 100644
index 00000000..bb8f4c14
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
@@ -0,0 +1,300 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni_info
+version_added: 1.0.0
+short_description: Gather information about EC2 ENI interfaces in AWS
+description:
+ - Gather information about EC2 ENI interfaces in AWS.
+author:
+ - "Rob White (@wimnat)"
+options:
+ eni_id:
+ description:
+ - The ID of the ENI.
+ - This option is mutually exclusive of I(filters).
+ type: str
+ version_added: 1.3.0
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
+ - This option is mutually exclusive of I(eni_id).
+ type: dict
+ default: {}
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all ENIs
+- amazon.aws.ec2_eni_info:
+
+# Gather information about a particular ENI
+- amazon.aws.ec2_eni_info:
+ filters:
+ network-interface-id: eni-xxxxxxx
+
+'''
+
+RETURN = '''
+network_interfaces:
+ description: List of matching elastic network interfaces.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: Info of associated elastic IP (EIP).
+ returned: When an ENI is associated with an EIP
+ type: dict
+ sample: {
+ allocation_id: "eipalloc-5sdf123",
+ association_id: "eipassoc-8sdf123",
+ ip_owner_id: "123456789012",
+ public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com",
+ public_ip: "52.1.0.63"
+ }
+ attachment:
+ description: Info about attached ec2 instance.
+ returned: When an ENI is attached to an ec2 instance
+ type: dict
+ sample: {
+ attach_time: "2017-08-05T15:25:47+00:00",
+ attachment_id: "eni-attach-149d21234",
+ delete_on_termination: false,
+ device_index: 1,
+ instance_id: "i-15b8d3cadbafa1234",
+ instance_owner_id: "123456789012",
+ status: "attached"
+ }
+ availability_zone:
+ description: Availability zone of ENI.
+ returned: always
+ type: str
+ sample: "us-east-1b"
+ description:
+ description: Description text for ENI.
+ returned: always
+ type: str
+ sample: "My favourite network interface"
+ groups:
+ description: List of attached security groups.
+ returned: always
+ type: list
+ sample: [
+ {
+ group_id: "sg-26d0f1234",
+ group_name: "my_ec2_security_group"
+ }
+ ]
+ id:
+ description: The id of the ENI (alias for network_interface_id).
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ interface_type:
+ description: Type of the network interface.
+ returned: always
+ type: str
+ sample: "interface"
+ ipv6_addresses:
+ description: List of IPv6 addresses for this interface.
+ returned: always
+ type: list
+ sample: []
+ mac_address:
+ description: MAC address of the network interface.
+ returned: always
+ type: str
+ sample: "0a:f8:10:2f:ab:a1"
+ name:
+ description: The Name tag of the ENI, often displayed in the AWS UIs as Name.
+ returned: When a Name tag has been set
+ type: str
+ version_added: 1.3.0
+ network_interface_id:
+ description: The id of the ENI.
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ owner_id:
+ description: AWS account id of the owner of the ENI.
+ returned: always
+ type: str
+ sample: "123456789012"
+ private_dns_name:
+ description: Private DNS name for the ENI.
+ returned: always
+ type: str
+ sample: "ip-172-16-1-180.ec2.internal"
+ private_ip_address:
+ description: Private IP address for the ENI.
+ returned: always
+ type: str
+ sample: "172.16.1.180"
+ private_ip_addresses:
+ description: List of private IP addresses attached to the ENI.
+ returned: always
+ type: list
+ sample: []
+ requester_id:
+ description: The ID of the entity that launched the ENI.
+ returned: always
+ type: str
+ sample: "AIDA12345EXAMPLE54321"
+ requester_managed:
+ description: Indicates whether the network interface is being managed by an AWS service.
+ returned: always
+ type: bool
+ sample: false
+ source_dest_check:
+ description: Indicates whether the network interface performs source/destination checking.
+ returned: always
+ type: bool
+ sample: false
+ status:
+ description: Indicates if the network interface is attached to an instance or not.
+ returned: always
+ type: str
+ sample: "in-use"
+ subnet_id:
+ description: Subnet ID the ENI is in.
+ returned: always
+ type: str
+ sample: "subnet-7bbf01234"
+ tags:
+ description: Dictionary of tags added to the ENI.
+ returned: always
+ type: dict
+ sample: {}
+ version_added: 1.3.0
+ tag_set:
+ description: Dictionary of tags added to the ENI.
+ returned: always
+ type: dict
+ sample: {}
+ vpc_id:
+ description: ID of the VPC the network interface it part of.
+ returned: always
+ type: str
+ sample: "vpc-b3f1f123"
+'''
+
+try:
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import NoCredentialsError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_eni(connection, module):
+
+ params = {}
+ # Options are mutually exclusive
+ if module.params.get("eni_id"):
+ params['NetworkInterfaceIds'] = [module.params.get("eni_id")]
+ elif module.params.get("filters"):
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+ else:
+ params['Filters'] = []
+
+ try:
+ network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **params)['NetworkInterfaces']
+ except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'):
+ module.exit_json(network_interfaces=[])
+ except (ClientError, NoCredentialsError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+ # Modify boto3 tags list to be ansible friendly dict and then camel_case
+ camel_network_interfaces = []
+ for network_interface in network_interfaces_result:
+ network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet'])
+ network_interface['Tags'] = network_interface['TagSet']
+ if 'Name' in network_interface['Tags']:
+ network_interface['Name'] = network_interface['Tags']['Name']
+ # Added id to interface info to be compatible with return values of ec2_eni module:
+ network_interface['Id'] = network_interface['NetworkInterfaceId']
+ camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=['Tags', 'TagSet']))
+
+ module.exit_json(network_interfaces=camel_network_interfaces)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if hasattr(interface, 'publicDnsName'):
+ interface_info['association'] = {'public_ip_address': interface.publicIp,
+ 'public_dns_name': interface.publicDnsName,
+ 'ip_owner_id': interface.ipOwnerId
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def main():
+ argument_spec = dict(
+ eni_id=dict(type='str'),
+ filters=dict(default=None, type='dict')
+ )
+ mutually_exclusive = [
+ ['eni_id', 'filters']
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ )
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py
new file mode 100644
index 00000000..999b2b22
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py
@@ -0,0 +1,2108 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_instance
+version_added: 1.0.0
+short_description: Create & manage EC2 instances
+description:
+ - Create and manage AWS EC2 instances.
+ - This module does not support creating
+ L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/).
+ - The M(amazon.aws.ec2_spot_instance) module can create and manage spot instances.
+author:
+ - Ryan Scott Brown (@ryansb)
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ - Mutually exclusive with I(exact_count).
+ type: list
+ elements: str
+ state:
+ description:
+ - Goal state for the instances.
+ - "I(state=present): ensures instances exist, but does not guarantee any state (e.g. running). Newly-launched instances will be run by EC2."
+ - "I(state=running): I(state=present) + ensures the instances are running"
+ - "I(state=started): I(state=running) + waits for EC2 status checks to report OK if I(wait=true)"
+ - "I(state=stopped): ensures an existing instance is stopped."
+ - "I(state=rebooted): convenience alias for I(state=stopped) immediately followed by I(state=running)"
+ - "I(state=restarted): convenience alias for I(state=stopped) immediately followed by I(state=started)"
+ - "I(state=terminated): ensures an existing instance is terminated."
+ - "I(state=absent): alias for I(state=terminated)"
+ choices: [present, terminated, running, started, stopped, restarted, rebooted, absent]
+ default: present
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for the desired I(state) (use (wait_timeout) to customize this).
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - How long to wait (in seconds) for the instance to finish booting/terminating.
+ default: 600
+ type: int
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
+ - Only required when instance is not already present.
+ - If not specified, C(t2.micro) will be used.
+ - In a release after 2023-01-01 the default will be removed and either I(instance_type) or
+ I(launch_template) must be specificed when launching an instance.
+ type: str
+ count:
+ description:
+ - Number of instances to launch.
+ - Setting this value will result in always launching new instances.
+ - Mutually exclusive with I(exact_count).
+ type: int
+ version_added: 2.2.0
+ exact_count:
+ description:
+ - An integer value which indicates how many instances that match the I(filters) parameter should be running.
+ - Instances are either created or terminated based on this value.
+ - If termination takes place, least recently created instances will be terminated based on Launch Time.
+ - Mutually exclusive with I(count), I(instance_ids).
+ type: int
+ version_added: 2.2.0
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the EC2 instance.
+ type: str
+ aap_callback:
+ description:
+ - Preconfigured user-data to enable an instance to perform an Ansible Automation Platform
+ callback (Linux only).
+ - For Windows instances, to enable remote access via Ansible set I(windows) to C(true), and
+ optionally set an admin password.
+ - If using I(windows) and I(set_password), callback ton Ansible Automation Platform will not
+ be performed but the instance will be ready to receive winrm connections from Ansible.
+ - Mutually exclusive with I(user_data).
+ type: dict
+ aliases: ['tower_callback']
+ suboptions:
+ windows:
+ description:
+ - Set I(windows=True) to use powershell instead of bash for the callback script.
+ type: bool
+ default: False
+ set_password:
+ description:
+ - Optional admin password to use if I(windows=True).
+ type: str
+ tower_address:
+ description:
+ - IP address or DNS name of Tower server. Must be accessible via this address from the
+ VPC that this instance will be launched in.
+ - Required if I(windows=False).
+ type: str
+ job_template_id:
+ description:
+ - Either the integer ID of the Tower Job Template, or the name.
+ Using a name for the job template is not supported by Ansible Tower prior to version
+ 3.2.
+ - Required if I(windows=False).
+ type: str
+ host_config_key:
+ description:
+ - Host configuration secret key generated by the Tower job template.
+ - Required if I(windows=False).
+ type: str
+ image:
+ description:
+ - An image to use for the instance. The M(amazon.aws.ec2_ami_info) module may be used to retrieve images.
+ One of I(image) or I(image_id) are required when instance is not already present.
+ type: dict
+ suboptions:
+ id:
+ description:
+ - The AMI ID.
+ type: str
+ ramdisk:
+ description:
+ - Overrides the AMI's default ramdisk ID.
+ type: str
+ kernel:
+ description:
+ - a string AKI to override the AMI kernel.
+ image_id:
+ description:
+ - I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present.
+ - This is an alias for I(image.id).
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs or names (strings).
+ - Mutually exclusive with I(security_group).
+ type: list
+ elements: str
+ security_group:
+ description:
+ - A security group ID or name.
+ - Mutually exclusive with I(security_groups).
+ type: str
+ name:
+ description:
+ - The Name tag for the instance.
+ type: str
+ vpc_subnet_id:
+ description:
+ - The subnet ID in which to launch the instance (VPC).
+ - If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC.
+ aliases: ['subnet_id']
+ type: str
+ network:
+ description:
+ - Either a dictionary containing the key C(interfaces) corresponding to a list of network interface IDs or
+ containing specifications for a single network interface.
+ - Use the M(amazon.aws.ec2_eni) module to create ENIs with special settings.
+ type: dict
+ suboptions:
+ interfaces:
+ description:
+ - A list of ENI IDs (strings) or a list of objects containing the key I(id).
+ type: list
+ elements: str
+ assign_public_ip:
+ description:
+ - When C(true) assigns a public IP address to the interface.
+ type: bool
+ private_ip_address:
+ description:
+ - An IPv4 address to assign to the interface.
+ type: str
+ ipv6_addresses:
+ description:
+ - A list of IPv6 addresses to assign to the network interface.
+ type: list
+ elements: str
+ source_dest_check:
+ description:
+ - Controls whether source/destination checking is enabled on the interface.
+ type: bool
+ description:
+ description:
+ - A description for the network interface.
+ type: str
+ private_ip_addresses:
+ description:
+ - A list of IPv4 addresses to assign to the network interface.
+ type: list
+ elements: str
+ subnet_id:
+ description:
+ - The subnet to connect the network interface to.
+ type: str
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is
+ terminated.
+ type: bool
+ device_index:
+ description:
+ - The index of the interface to modify.
+ type: int
+ groups:
+ description:
+ - A list of security group IDs to attach to the interface.
+ type: list
+ elements: str
+ volumes:
+ description:
+ - A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage.
+ - A mapping contains the (optional) keys C(device_name), C(virtual_name), C(ebs.volume_type), C(ebs.volume_size), C(ebs.kms_key_id),
+ C(ebs.snapshot_id), C(ebs.iops), and C(ebs.delete_on_termination).
+ - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html).
+ type: list
+ elements: dict
+ launch_template:
+ description:
+ - The EC2 launch template to base instance configuration on.
+ type: dict
+ suboptions:
+ id:
+ description:
+ - The ID of the launch template (optional if name is specified).
+ type: str
+ name:
+ description:
+ - The pretty name of the launch template (optional if id is specified).
+ type: str
+ version:
+ description:
+ - The specific version of the launch template to use. If unspecified, the template default is chosen.
+ key_name:
+ description:
+ - Name of the SSH access key to assign to the instance - must exist in the region the instance is created.
+ - Use M(amazon.aws.ec2_key) to manage SSH keys.
+ type: str
+ availability_zone:
+ description:
+ - Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter.
+ - If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted).
+ type: str
+ instance_initiated_shutdown_behavior:
+ description:
+ - Whether to stop or terminate an instance upon shutdown.
+ choices: ['stop', 'terminate']
+ type: str
+ tenancy:
+ description:
+ - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
+ choices: ['dedicated', 'default']
+ type: str
+ termination_protection:
+ description:
+ - Whether to enable termination protection.
+ - This module will not terminate an instance with termination protection active, it must be turned off first.
+ type: bool
+ hibernation_options:
+ description:
+ - Indicates whether an instance is enabled for hibernation.
+ Refer U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html)
+ for Hibernation prerequisits.
+ type: bool
+ default: False
+ version_added: 5.0.0
+ cpu_credit_specification:
+ description:
+ - For T series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted.
+ - Choose C(unlimited) to enable buying additional CPU credits.
+ choices: ['unlimited', 'standard']
+ type: str
+ cpu_options:
+ description:
+ - Reduce the number of vCPU exposed to the instance.
+ - Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available.
+ type: dict
+ suboptions:
+ threads_per_core:
+ description:
+ - Select the number of threads per core to enable. Disable or Enable Intel HT.
+ choices: [1, 2]
+ required: true
+ type: int
+ core_count:
+ description:
+ - Set the number of core to enable.
+ required: true
+ type: int
+ detailed_monitoring:
+ description:
+ - Whether to allow detailed CloudWatch metrics to be collected, enabling more detailed alerting.
+ type: bool
+ ebs_optimized:
+ description:
+ - Whether instance is should use optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ type: bool
+ filters:
+ description:
+ - A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item
+ consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
+ for possible filters. Filter names and values are case sensitive.
+ - By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and
+ subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups.
+ type: dict
+ iam_instance_profile:
+ description:
+ - The ARN or name of an EC2-enabled IAM instance profile to be used.
+ - If a name is not provided in ARN format then the ListInstanceProfiles permission must also be granted.
+ U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html)
+ - If no full ARN is provided, the role with a matching name will be used from the active AWS account.
+ type: str
+ aliases: ['instance_role']
+ placement_group:
+ description:
+ - The placement group that needs to be assigned to the instance.
+ type: str
+ metadata_options:
+ description:
+ - Modify the metadata options for the instance.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) for more information.
+ - The two suboptions I(http_endpoint) and I(http_tokens) are supported.
+ type: dict
+ version_added: 2.0.0
+ suboptions:
+ http_endpoint:
+ description:
+ - Enables or disables the HTTP metadata endpoint on instances.
+ - If specified a value of disabled, metadata of the instance will not be accessible.
+ choices: [enabled, disabled]
+ default: enabled
+ type: str
+ http_tokens:
+ description:
+ - Set the state of token usage for instance metadata requests.
+ - If the state is optional (v1 and v2), instance metadata can be retrieved with or without a signed token header on request.
+ - If the state is required (v2), a signed token header must be sent with any instance metadata retrieval requests.
+ choices: [optional, required]
+ default: optional
+ type: str
+ http_put_response_hop_limit:
+ version_added: 4.0.0
+ type: int
+ description:
+ - The desired HTTP PUT response hop limit for instance metadata requests.
+ - The larger the number, the further instance metadata requests can travel.
+ default: 1
+ http_protocol_ipv6:
+ version_added: 4.0.0
+ type: str
+ description:
+ - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)).
+ - Requires botocore >= 1.21.29
+ choices: [enabled, disabled]
+ default: 'disabled'
+ instance_metadata_tags:
+ version_added: 4.0.0
+ type: str
+ description:
+ - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)).
+ - Requires botocore >= 1.23.30
+ choices: [enabled, disabled]
+ default: 'disabled'
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Terminate every running instance in a region. Use with EXTREME caution.
+ amazon.aws.ec2_instance:
+ state: absent
+ filters:
+ instance-state-name: running
+
+- name: restart a particular instance by its ID
+ amazon.aws.ec2_instance:
+ state: restarted
+ instance_ids:
+ - i-12345678
+
+- name: start an instance with a public IP address
+ amazon.aws.ec2_instance:
+ name: "public-compute-instance"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ instance_type: c5.large
+ security_group: default
+ network:
+ assign_public_ip: true
+ image_id: ami-123456
+ tags:
+ Environment: Testing
+
+- name: start an instance and Add EBS
+ amazon.aws.ec2_instance:
+ name: "public-withebs-instance"
+ vpc_subnet_id: subnet-5ca1ab1e
+ instance_type: t2.micro
+ key_name: "prod-ssh-key"
+ security_group: default
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ volume_size: 16
+ delete_on_termination: true
+
+- name: start an instance and Add EBS volume from a snapshot
+ amazon.aws.ec2_instance:
+ name: "public-withebs-instance"
+ instance_type: t2.micro
+ image_id: ami-1234567890
+ vpc_subnet_id: subnet-5ca1ab1e
+ volumes:
+ - device_name: /dev/sda2
+ ebs:
+ snapshot_id: snap-1234567890
+
+- name: start an instance with a cpu_options
+ amazon.aws.ec2_instance:
+ name: "public-cpuoption-instance"
+ vpc_subnet_id: subnet-5ca1ab1e
+ tags:
+ Environment: Testing
+ instance_type: c4.large
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+
+- name: start an instance and have it begin a Tower callback on boot
+ amazon.aws.ec2_instance:
+ name: "tower-callback-test"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ security_group: default
+ tower_callback:
+ # IP or hostname of tower server
+ tower_address: 1.2.3.4
+ job_template_id: 876
+ host_config_key: '[secret config key goes here]'
+ network:
+ assign_public_ip: true
+ image_id: ami-123456
+ cpu_credit_specification: unlimited
+ tags:
+ SomeThing: "A value"
+
+- name: start an instance with ENI (An existing ENI ID is required)
+ amazon.aws.ec2_instance:
+ name: "public-eni-instance"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ network:
+ interfaces:
+ - id: "eni-12345"
+ tags:
+ Env: "eni_on"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ instance_type: t2.micro
+ image_id: ami-123456
+
+- name: add second ENI interface
+ amazon.aws.ec2_instance:
+ name: "public-eni-instance"
+ network:
+ interfaces:
+ - id: "eni-12345"
+ - id: "eni-67890"
+ image_id: ami-123456
+ tags:
+ Env: "eni_on"
+ instance_type: t2.micro
+
+- name: start an instance with metadata options
+ amazon.aws.ec2_instance:
+ name: "public-metadataoptions-instance"
+ vpc_subnet_id: subnet-5calable
+ instance_type: t3.small
+ image_id: ami-123456
+ tags:
+ Environment: Testing
+ metadata_options:
+ http_endpoint: enabled
+ http_tokens: optional
+
+# ensure number of instances running with a tag matches exact_count
+- name: start multiple instances
+ amazon.aws.ec2_instance:
+ instance_type: t3.small
+ image_id: ami-123456
+ exact_count: 5
+ region: us-east-2
+ vpc_subnet_id: subnet-0123456
+ network:
+ assign_public_ip: true
+ security_group: default
+ tags:
+ foo: bar
+
+# launches multiple instances - specific number of instances
+- name: start specific number of multiple instances
+ amazon.aws.ec2_instance:
+ instance_type: t3.small
+ image_id: ami-123456
+ count: 3
+ region: us-east-2
+ network:
+ assign_public_ip: true
+ security_group: default
+ vpc_subnet_id: subnet-0123456
+ state: present
+ tags:
+ foo: bar
+'''
+
+RETURN = '''
+instances:
+ description: a list of ec2 instances
+ returned: when wait == true
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume
+ returned: always
+ type: str
+ sample: vol-12345678
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::123456789012:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ network.source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+from collections import namedtuple
+import string
+import textwrap
+import time
+import uuid
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib import parse as urlparse
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.core import parse_aws_arn
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.tower import tower_callback_script
+
+module = None
+
+
+def build_volume_spec(params):
+ volumes = params.get('volumes') or []
+ for volume in volumes:
+ if 'ebs' in volume:
+ for int_value in ['volume_size', 'iops']:
+ if int_value in volume['ebs']:
+ volume['ebs'][int_value] = int(volume['ebs'][int_value])
+ if 'volume_type' in volume['ebs'] and volume['ebs']['volume_type'] == 'gp3':
+ if not volume['ebs'].get('iops'):
+ volume['ebs']['iops'] = 3000
+ if 'throughput' in volume['ebs']:
+ volume['ebs']['throughput'] = int(volume['ebs']['throughput'])
+ else:
+ volume['ebs']['throughput'] = 125
+
+ return [snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes]
+
+
+def add_or_update_instance_profile(instance, desired_profile_name):
+ instance_profile_setting = instance.get('IamInstanceProfile')
+ if instance_profile_setting and desired_profile_name:
+ if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')):
+ # great, the profile we asked for is what's there
+ return False
+ else:
+ desired_arn = determine_iam_role(desired_profile_name)
+ if instance_profile_setting.get('Arn') == desired_arn:
+ return False
+
+ # update association
+ try:
+ association = client.describe_iam_instance_profile_associations(
+ aws_retry=True,
+ Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ # check for InvalidAssociationID.NotFound
+ module.fail_json_aws(e, "Could not find instance profile association")
+ try:
+ client.replace_iam_instance_profile_association(
+ aws_retry=True,
+ AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'],
+ IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}
+ )
+ return True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, "Could not associate instance profile")
+
+ if not instance_profile_setting and desired_profile_name:
+ # create association
+ try:
+ client.associate_iam_instance_profile(
+ aws_retry=True,
+ IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)},
+ InstanceId=instance['InstanceId']
+ )
+ return True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, "Could not associate new instance profile")
+
+ return False
+
+
+def build_network_spec(params):
+ """
+ Returns list of interfaces [complex]
+ Interface type: {
+ 'AssociatePublicIpAddress': True|False,
+ 'DeleteOnTermination': True|False,
+ 'Description': 'string',
+ 'DeviceIndex': 123,
+ 'Groups': [
+ 'string',
+ ],
+ 'Ipv6AddressCount': 123,
+ 'Ipv6Addresses': [
+ {
+ 'Ipv6Address': 'string'
+ },
+ ],
+ 'NetworkInterfaceId': 'string',
+ 'PrivateIpAddress': 'string',
+ 'PrivateIpAddresses': [
+ {
+ 'Primary': True|False,
+ 'PrivateIpAddress': 'string'
+ },
+ ],
+ 'SecondaryPrivateIpAddressCount': 123,
+ 'SubnetId': 'string'
+ },
+ """
+
+ interfaces = []
+ network = params.get('network') or {}
+ if not network.get('interfaces'):
+ # they only specified one interface
+ spec = {
+ 'DeviceIndex': 0,
+ }
+ if network.get('assign_public_ip') is not None:
+ spec['AssociatePublicIpAddress'] = network['assign_public_ip']
+
+ if params.get('vpc_subnet_id'):
+ spec['SubnetId'] = params['vpc_subnet_id']
+ else:
+ default_vpc = get_default_vpc()
+ if default_vpc is None:
+ module.fail_json(
+ msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance")
+ else:
+ sub = get_default_subnet(default_vpc, availability_zone=module.params.get('availability_zone'))
+ spec['SubnetId'] = sub['SubnetId']
+
+ if network.get('private_ip_address'):
+ spec['PrivateIpAddress'] = network['private_ip_address']
+
+ if params.get('security_group') or params.get('security_groups'):
+ groups = discover_security_groups(
+ group=params.get('security_group'),
+ groups=params.get('security_groups'),
+ subnet_id=spec['SubnetId'],
+ )
+ spec['Groups'] = groups
+ if network.get('description') is not None:
+ spec['Description'] = network['description']
+ # TODO more special snowflake network things
+
+ return [spec]
+
+ # handle list of `network.interfaces` options
+ for idx, interface_params in enumerate(network.get('interfaces', [])):
+ spec = {
+ 'DeviceIndex': idx,
+ }
+
+ if isinstance(interface_params, string_types):
+ # naive case where user gave
+ # network_interfaces: [eni-1234, eni-4567, ....]
+ # put into normal data structure so we don't dupe code
+ interface_params = {'id': interface_params}
+
+ if interface_params.get('id') is not None:
+ # if an ID is provided, we don't want to set any other parameters.
+ spec['NetworkInterfaceId'] = interface_params['id']
+ interfaces.append(spec)
+ continue
+
+ spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True)
+
+ if interface_params.get('ipv6_addresses'):
+ spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])]
+
+ if interface_params.get('private_ip_address'):
+ spec['PrivateIpAddress'] = interface_params.get('private_ip_address')
+
+ if interface_params.get('description'):
+ spec['Description'] = interface_params.get('description')
+
+ if interface_params.get('subnet_id', params.get('vpc_subnet_id')):
+ spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id'))
+ elif not spec.get('SubnetId') and not interface_params['id']:
+ # TODO grab a subnet from default VPC
+ raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params))
+
+ interfaces.append(spec)
+ return interfaces
+
+
+def warn_if_public_ip_assignment_changed(instance):
+ # This is a non-modifiable attribute.
+ assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip')
+ if assign_public_ip is None:
+ return
+
+ # Check that public ip assignment is the same and warn if not
+ public_dns_name = instance.get('PublicDnsName')
+ if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name):
+ module.warn(
+ "Unable to modify public ip assignment to {0} for instance {1}. "
+ "Whether or not to assign a public IP is determined during instance creation.".format(
+ assign_public_ip, instance['InstanceId']))
+
+
+def warn_if_cpu_options_changed(instance):
+ # This is a non-modifiable attribute.
+ cpu_options = module.params.get('cpu_options')
+ if cpu_options is None:
+ return
+
+ # Check that the CpuOptions set are the same and warn if not
+ core_count_curr = instance['CpuOptions'].get('CoreCount')
+ core_count = cpu_options.get('core_count')
+ threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore')
+ threads_per_core = cpu_options.get('threads_per_core')
+ if core_count_curr != core_count:
+ module.warn(
+ "Unable to modify core_count from {0} to {1}. "
+ "Assigning a number of core is determinted during instance creation".format(
+ core_count_curr, core_count))
+
+ if threads_per_core_curr != threads_per_core:
+ module.warn(
+ "Unable to modify threads_per_core from {0} to {1}. "
+ "Assigning a number of threads per core is determined during instance creation.".format(
+ threads_per_core_curr, threads_per_core))
+
+
+def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None):
+
+ if subnet_id is not None:
+ try:
+ sub = client.describe_subnets(aws_retry=True, SubnetIds=[subnet_id])
+ except is_boto3_error_code('InvalidGroup.NotFound'):
+ module.fail_json(
+ "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format(
+ subnet_id
+ )
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
+ parent_vpc_id = sub['Subnets'][0]['VpcId']
+
+ if group:
+ return get_ec2_security_group_ids_from_names(group, client, vpc_id=parent_vpc_id)
+ if groups:
+ return get_ec2_security_group_ids_from_names(groups, client, vpc_id=parent_vpc_id)
+ return []
+
+
+def build_userdata(params):
+ if params.get('user_data') is not None:
+ return {'UserData': to_native(params.get('user_data'))}
+ if params.get('aap_callback'):
+ userdata = tower_callback_script(
+ tower_address=params.get('aap_callback').get('tower_address'),
+ job_template_id=params.get('aap_callback').get('job_template_id'),
+ host_config_key=params.get('aap_callback').get('host_config_key'),
+ windows=params.get('aap_callback').get('windows'),
+ passwd=params.get('aap_callback').get('set_passwd'),
+ )
+ return {'UserData': userdata}
+ return {}
+
+
+def build_top_level_options(params):
+ spec = {}
+ if params.get('image_id'):
+ spec['ImageId'] = params['image_id']
+ elif isinstance(params.get('image'), dict):
+ image = params.get('image', {})
+ spec['ImageId'] = image.get('id')
+ if 'ramdisk' in image:
+ spec['RamdiskId'] = image['ramdisk']
+ if 'kernel' in image:
+ spec['KernelId'] = image['kernel']
+ if not spec.get('ImageId') and not params.get('launch_template'):
+ module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.")
+
+ if params.get('key_name') is not None:
+ spec['KeyName'] = params.get('key_name')
+
+ spec.update(build_userdata(params))
+
+ if params.get('launch_template') is not None:
+ spec['LaunchTemplate'] = {}
+ if not params.get('launch_template').get('id') and not params.get('launch_template').get('name'):
+ module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required")
+
+ if params.get('launch_template').get('id') is not None:
+ spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id')
+ if params.get('launch_template').get('name') is not None:
+ spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name')
+ if params.get('launch_template').get('version') is not None:
+ spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version'))
+
+ if params.get('detailed_monitoring', False):
+ spec['Monitoring'] = {'Enabled': True}
+ if params.get('cpu_credit_specification') is not None:
+ spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')}
+ if params.get('tenancy') is not None:
+ spec['Placement'] = {'Tenancy': params.get('tenancy')}
+ if params.get('placement_group'):
+ if 'Placement' in spec:
+ spec['Placement']['GroupName'] = str(params.get('placement_group'))
+ else:
+ spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))})
+ if params.get('ebs_optimized') is not None:
+ spec['EbsOptimized'] = params.get('ebs_optimized')
+ if params.get('instance_initiated_shutdown_behavior'):
+ spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior')
+ if params.get('termination_protection') is not None:
+ spec['DisableApiTermination'] = params.get('termination_protection')
+ if params.get('hibernation_options') and params.get('volumes'):
+ for vol in params['volumes']:
+ if vol.get('ebs') and vol['ebs'].get('encrypted'):
+ spec['HibernationOptions'] = {'Configured': True}
+ else:
+ module.fail_json(
+ msg="Hibernation prerequisites not satisfied. Refer {0}".format(
+ "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html")
+ )
+ if params.get('cpu_options') is not None:
+ spec['CpuOptions'] = {}
+ spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core')
+ spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count')
+ if params.get('metadata_options'):
+ spec['MetadataOptions'] = {}
+ spec['MetadataOptions']['HttpEndpoint'] = params.get(
+ 'metadata_options').get('http_endpoint')
+ spec['MetadataOptions']['HttpTokens'] = params.get(
+ 'metadata_options').get('http_tokens')
+ spec['MetadataOptions']['HttpPutResponseHopLimit'] = params.get(
+ 'metadata_options').get('http_put_response_hop_limit')
+
+ if not module.botocore_at_least('1.23.30'):
+ # fail only if enabled is requested
+ if params.get('metadata_options').get('instance_metadata_tags') == 'enabled':
+ module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags')
+ else:
+ spec['MetadataOptions']['InstanceMetadataTags'] = params.get(
+ 'metadata_options').get('instance_metadata_tags')
+
+ if not module.botocore_at_least('1.21.29'):
+ # fail only if enabled is requested
+ if params.get('metadata_options').get('http_protocol_ipv6') == 'enabled':
+ module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6')
+ else:
+ spec['MetadataOptions']['HttpProtocolIpv6'] = params.get(
+ 'metadata_options').get('http_protocol_ipv6')
+
+ return spec
+
+
+def build_instance_tags(params, propagate_tags_to_volumes=True):
+ tags = params.get('tags') or {}
+ if params.get('name') is not None:
+ tags['Name'] = params.get('name')
+ specs = boto3_tag_specifications(tags, ['volume', 'instance'])
+ return specs
+
+
+def build_run_instance_spec(params):
+
+ spec = dict(
+ ClientToken=uuid.uuid4().hex,
+ MaxCount=1,
+ MinCount=1,
+ )
+ spec.update(**build_top_level_options(params))
+
+ spec['NetworkInterfaces'] = build_network_spec(params)
+ spec['BlockDeviceMappings'] = build_volume_spec(params)
+
+ tag_spec = build_instance_tags(params)
+ if tag_spec is not None:
+ spec['TagSpecifications'] = tag_spec
+
+ # IAM profile
+ if params.get('iam_instance_profile'):
+ spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('iam_instance_profile')))
+
+ if params.get('exact_count'):
+ spec['MaxCount'] = params.get('to_launch')
+ spec['MinCount'] = params.get('to_launch')
+
+ if params.get('count'):
+ spec['MaxCount'] = params.get('count')
+ spec['MinCount'] = params.get('count')
+
+ if not params.get('launch_template'):
+ spec['InstanceType'] = params['instance_type'] if params.get('instance_type') else 't2.micro'
+
+ if params.get('launch_template') and params.get('instance_type'):
+ spec['InstanceType'] = params['instance_type']
+
+ return spec
+
+
+def await_instances(ids, desired_module_state='present', force_wait=False):
+ if not module.params.get('wait', True) and not force_wait:
+ # the user asked not to wait for anything
+ return
+
+ if module.check_mode:
+ # In check mode, there is no change even if you wait.
+ return
+
+ # Map ansible state to boto3 waiter type
+ state_to_boto3_waiter = {
+ 'present': 'instance_exists',
+ 'started': 'instance_status_ok',
+ 'running': 'instance_running',
+ 'stopped': 'instance_stopped',
+ 'restarted': 'instance_status_ok',
+ 'rebooted': 'instance_running',
+ 'terminated': 'instance_terminated',
+ 'absent': 'instance_terminated',
+ }
+ if desired_module_state not in state_to_boto3_waiter:
+ module.fail_json(msg="Cannot wait for state {0}, invalid state".format(desired_module_state))
+ boto3_waiter_type = state_to_boto3_waiter[desired_module_state]
+ waiter = client.get_waiter(boto3_waiter_type)
+ try:
+ waiter.wait(
+ InstanceIds=ids,
+ WaiterConfig={
+ 'Delay': 15,
+ 'MaxAttempts': module.params.get('wait_timeout', 600) // 15,
+ }
+ )
+ except botocore.exceptions.WaiterConfigError as e:
+ module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format(
+ to_native(e), ', '.join(ids), boto3_waiter_type))
+ except botocore.exceptions.WaiterError as e:
+ module.warn("Instances {0} took too long to reach state {1}. {2}".format(
+ ', '.join(ids), boto3_waiter_type, to_native(e)))
+
+
+def diff_instance_and_params(instance, params, skip=None):
+ """boto3 instance obj, module params"""
+
+ if skip is None:
+ skip = []
+
+ changes_to_apply = []
+ id_ = instance['InstanceId']
+
+ ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value'])
+
+ def value_wrapper(v):
+ return {'Value': v}
+
+ param_mappings = [
+ ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper),
+ ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper),
+ # user data is an immutable property
+ # ParamMapper('user_data', 'UserData', 'userData', value_wrapper),
+ ]
+
+ for mapping in param_mappings:
+ if params.get(mapping.param_key) is None:
+ continue
+ if mapping.instance_key in skip:
+ continue
+
+ try:
+ value = client.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not describe attribute {0} for instance {1}".format(mapping.attribute_name, id_))
+ if value[mapping.instance_key]['Value'] != params.get(mapping.param_key):
+ arguments = dict(
+ InstanceId=instance['InstanceId'],
+ # Attribute=mapping.attribute_name,
+ )
+ arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key))
+ changes_to_apply.append(arguments)
+
+ if params.get('security_group') or params.get('security_groups'):
+ try:
+ value = client.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not describe attribute groupSet for instance {0}".format(id_))
+ # managing security groups
+ if params.get('vpc_subnet_id'):
+ subnet_id = params.get('vpc_subnet_id')
+ else:
+ default_vpc = get_default_vpc()
+ if default_vpc is None:
+ module.fail_json(
+ msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to modify security groups.")
+ else:
+ sub = get_default_subnet(default_vpc)
+ subnet_id = sub['SubnetId']
+
+ groups = discover_security_groups(
+ group=params.get('security_group'),
+ groups=params.get('security_groups'),
+ subnet_id=subnet_id,
+ )
+ expected_groups = groups
+ instance_groups = [g['GroupId'] for g in value['Groups']]
+ if set(instance_groups) != set(expected_groups):
+ changes_to_apply.append(dict(
+ Groups=expected_groups,
+ InstanceId=instance['InstanceId']
+ ))
+
+ if (params.get('network') or {}).get('source_dest_check') is not None:
+ # network.source_dest_check is nested, so needs to be treated separately
+ check = bool(params.get('network').get('source_dest_check'))
+ if instance['SourceDestCheck'] != check:
+ changes_to_apply.append(dict(
+ InstanceId=instance['InstanceId'],
+ SourceDestCheck={'Value': check},
+ ))
+
+ return changes_to_apply
+
+
+def change_network_attachments(instance, params):
+ if (params.get('network') or {}).get('interfaces') is not None:
+ new_ids = []
+ for inty in params.get('network').get('interfaces'):
+ if isinstance(inty, dict) and 'id' in inty:
+ new_ids.append(inty['id'])
+ elif isinstance(inty, string_types):
+ new_ids.append(inty)
+ # network.interfaces can create the need to attach new interfaces
+ old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']]
+ to_attach = set(new_ids) - set(old_ids)
+ for eni_id in to_attach:
+ try:
+ client.attach_network_interface(
+ aws_retry=True,
+ DeviceIndex=new_ids.index(eni_id),
+ InstanceId=instance['InstanceId'],
+ NetworkInterfaceId=eni_id,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not attach interface {0} to instance {1}".format(eni_id, instance['InstanceId']))
+ return bool(len(to_attach))
+ return False
+
+
+def find_instances(ids=None, filters=None):
+ sanitized_filters = dict()
+
+ if ids:
+ params = dict(InstanceIds=ids)
+ elif filters is None:
+ module.fail_json(msg="No filters provided when they were required")
+ else:
+ for key in list(filters.keys()):
+ if not key.startswith("tag:"):
+ sanitized_filters[key.replace("_", "-")] = filters[key]
+ else:
+ sanitized_filters[key] = filters[key]
+ params = dict(Filters=ansible_dict_to_boto3_filter_list(sanitized_filters))
+
+ try:
+ results = _describe_instances(**params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not describe instances")
+
+ retval = list(results)
+ return retval
+
+
+@AWSRetry.jittered_backoff()
+def _describe_instances(**params):
+ paginator = client.get_paginator('describe_instances')
+ return paginator.paginate(**params).search('Reservations[].Instances[]')
+
+
+def get_default_vpc():
+ try:
+ vpcs = client.describe_vpcs(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'}))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not describe default VPC")
+ if len(vpcs.get('Vpcs', [])):
+ return vpcs.get('Vpcs')[0]
+ return None
+
+
+def get_default_subnet(vpc, availability_zone=None):
+ try:
+ subnets = client.describe_subnets(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list({
+ 'vpc-id': vpc['VpcId'],
+ 'state': 'available',
+ 'default-for-az': 'true',
+ })
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not describe default subnets for VPC {0}".format(vpc['VpcId']))
+ if len(subnets.get('Subnets', [])):
+ if availability_zone is not None:
+ subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets'))
+ if availability_zone in subs_by_az:
+ return subs_by_az[availability_zone]
+
+ # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first
+ # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list
+ by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone'])
+ return by_az[0]
+ return None
+
+
+def ensure_instance_state(desired_module_state):
+ """
+ Sets return keys depending on the desired instance state
+ """
+ results = dict()
+ changed = False
+ if desired_module_state in ('running', 'started'):
+ _changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'), desired_module_state=desired_module_state)
+ changed |= bool(len(_changed))
+
+ if failed:
+ module.fail_json(
+ msg="Unable to start instances: {0}".format(failure_reason),
+ reboot_success=list(_changed),
+ reboot_failed=failed)
+
+ results = dict(
+ msg='Instances started',
+ start_success=list(_changed),
+ start_failed=[],
+ # Avoid breaking things 'reboot' is wrong but used to be returned
+ reboot_success=list(_changed),
+ reboot_failed=[],
+ changed=changed,
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif desired_module_state in ('restarted', 'rebooted'):
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-reboot.html
+ # The Ansible behaviour of issuing a stop/start has a minor impact on user billing
+ # This will need to be changelogged if we ever change to client.reboot_instance
+ _changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_module_state='stopped',
+ )
+
+ if failed:
+ module.fail_json(
+ msg="Unable to stop instances: {0}".format(failure_reason),
+ stop_success=list(_changed),
+ stop_failed=failed)
+
+ changed |= bool(len(_changed))
+ _changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_module_state=desired_module_state,
+ )
+ changed |= bool(len(_changed))
+
+ if failed:
+ module.fail_json(
+ msg="Unable to restart instances: {0}".format(failure_reason),
+ reboot_success=list(_changed),
+ reboot_failed=failed)
+
+ results = dict(
+ msg='Instances restarted',
+ reboot_success=list(_changed),
+ changed=changed,
+ reboot_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif desired_module_state in ('stopped',):
+ _changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_module_state=desired_module_state,
+ )
+ changed |= bool(len(_changed))
+
+ if failed:
+ module.fail_json(
+ msg="Unable to stop instances: {0}".format(failure_reason),
+ stop_success=list(_changed),
+ stop_failed=failed)
+
+ results = dict(
+ msg='Instances stopped',
+ stop_success=list(_changed),
+ changed=changed,
+ stop_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif desired_module_state in ('absent', 'terminated'):
+ terminated, terminate_failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_module_state=desired_module_state,
+ )
+
+ if terminate_failed:
+ module.fail_json(
+ msg="Unable to terminate instances: {0}".format(failure_reason),
+ terminate_success=list(terminated),
+ terminate_failed=terminate_failed)
+ results = dict(
+ msg='Instances terminated',
+ terminate_success=list(terminated),
+ changed=bool(len(terminated)),
+ terminate_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ return results
+
+
+def change_instance_state(filters, desired_module_state):
+
+ # Map ansible state to ec2 state
+ ec2_instance_states = {
+ 'present': 'running',
+ 'started': 'running',
+ 'running': 'running',
+ 'stopped': 'stopped',
+ 'restarted': 'running',
+ 'rebooted': 'running',
+ 'terminated': 'terminated',
+ 'absent': 'terminated',
+ }
+ desired_ec2_state = ec2_instance_states[desired_module_state]
+ changed = set()
+ instances = find_instances(filters=filters)
+ to_change = set(i['InstanceId'] for i in instances if i['State']['Name'] != desired_ec2_state)
+ unchanged = set()
+ failure_reason = ""
+
+ for inst in instances:
+ try:
+ if desired_ec2_state == 'terminated':
+ # Before terminating an instance we need for them to leave
+ # 'pending' or 'stopping' (if they're in those states)
+ if inst['State']['Name'] == 'stopping':
+ await_instances([inst['InstanceId']], desired_module_state='stopped', force_wait=True)
+ elif inst['State']['Name'] == 'pending':
+ await_instances([inst['InstanceId']], desired_module_state='running', force_wait=True)
+
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ # TODO use a client-token to prevent double-sends of these start/stop/terminate commands
+ # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html
+ resp = client.terminate_instances(aws_retry=True, InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']]
+ if desired_ec2_state == 'stopped':
+ # Before stopping an instance we need for them to leave
+ # 'pending'
+ if inst['State']['Name'] == 'pending':
+ await_instances([inst['InstanceId']], desired_module_state='running', force_wait=True)
+ # Already moving to the relevant state
+ elif inst['State']['Name'] in ('stopping', 'stopped'):
+ unchanged.add(inst['InstanceId'])
+ continue
+
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+ resp = client.stop_instances(aws_retry=True, InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['StoppingInstances']]
+ if desired_ec2_state == 'running':
+ if inst['State']['Name'] in ('pending', 'running'):
+ unchanged.add(inst['InstanceId'])
+ continue
+ elif inst['State']['Name'] == 'stopping':
+ await_instances([inst['InstanceId']], desired_module_state='stopped', force_wait=True)
+
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ resp = client.start_instances(aws_retry=True, InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['StartingInstances']]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ try:
+ failure_reason = to_native(e.message)
+ except AttributeError:
+ failure_reason = to_native(e)
+
+ if changed:
+ await_instances(ids=list(changed) + list(unchanged), desired_module_state=desired_module_state)
+
+ change_failed = list(to_change - changed)
+
+ if instances:
+ instances = find_instances(ids=list(i['InstanceId'] for i in instances))
+ return changed, change_failed, instances, failure_reason
+
+
+def pretty_instance(i):
+ instance = camel_dict_to_snake_dict(i, ignore_list=['Tags'])
+ instance['tags'] = boto3_tag_list_to_ansible_dict(i.get('Tags', {}))
+ return instance
+
+
+def determine_iam_role(name_or_arn):
+ result = parse_aws_arn(name_or_arn)
+ if result and result['service'] == 'iam' and result['resource'].startswith('instance-profile/'):
+ return name_or_arn
+ iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ try:
+ role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
+ return role['InstanceProfile']['Arn']
+ except is_boto3_error_code('NoSuchEntity') as e:
+ module.fail_json_aws(e, msg="Could not find iam_instance_profile {0}".format(name_or_arn))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="An error occurred while searching for iam_instance_profile {0}. Please try supplying the full ARN.".format(name_or_arn))
+
+
+def handle_existing(existing_matches, state):
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ name = module.params.get('name')
+
+ # Name is a tag rather than a direct parameter, we need to inject 'Name'
+ # into tags, but since tags isn't explicitly passed we'll treat it not being
+ # set as purge_tags == False
+ if name:
+ if tags is None:
+ purge_tags = False
+ tags = {}
+ tags.update({'Name': name})
+
+ changed = False
+ all_changes = list()
+
+ for instance in existing_matches:
+ changed |= ensure_ec2_tags(client, module, instance['InstanceId'], tags=tags, purge_tags=purge_tags)
+ changes = diff_instance_and_params(instance, module.params)
+ for c in changes:
+ if not module.check_mode:
+ try:
+ client.modify_instance_attribute(aws_retry=True, **c)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not apply change {0} to existing instance.".format(str(c)))
+ all_changes.extend(changes)
+ changed |= bool(changes)
+ changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('iam_instance_profile'))
+ changed |= change_network_attachments(existing_matches[0], module.params)
+
+ altered = find_instances(ids=[i['InstanceId'] for i in existing_matches])
+ alter_config_result = dict(
+ changed=changed,
+ instances=[pretty_instance(i) for i in altered],
+ instance_ids=[i['InstanceId'] for i in altered],
+ changes=changes,
+ )
+
+ state_results = ensure_instance_state(state)
+ alter_config_result['changed'] |= state_results.pop('changed', False)
+ result = {**state_results, **alter_config_result}
+
+ return result
+
+
+def enforce_count(existing_matches, module, desired_module_state):
+ exact_count = module.params.get('exact_count')
+
+ try:
+ current_count = len(existing_matches)
+ if current_count == exact_count:
+ module.exit_json(
+ changed=False,
+ msg='{0} instances already running, nothing to do.'.format(exact_count)
+ )
+
+ elif current_count < exact_count:
+ to_launch = exact_count - current_count
+ module.params['to_launch'] = to_launch
+ # launch instances
+ try:
+ ensure_present(existing_matches=existing_matches, desired_module_state=desired_module_state)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(e, msg='Unable to launch instances')
+ elif current_count > exact_count:
+ to_terminate = current_count - exact_count
+ # sort the instances from least recent to most recent based on launch time
+ existing_matches = sorted(existing_matches, key=lambda inst: inst['LaunchTime'])
+ # get the instance ids of instances with the count tag on them
+ all_instance_ids = [x['InstanceId'] for x in existing_matches]
+ terminate_ids = all_instance_ids[0:to_terminate]
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have terminated following instances if not in check mode {0}'.format(terminate_ids))
+ # terminate instances
+ try:
+ client.terminate_instances(aws_retry=True, InstanceIds=terminate_ids)
+ await_instances(terminate_ids, desired_module_state='terminated', force_wait=True)
+ except is_boto3_error_code('InvalidInstanceID.NotFound'):
+ pass
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json(e, msg='Unable to terminate instances')
+ module.exit_json(
+ changed=True,
+ msg='Successfully terminated instances.',
+ terminated_ids=terminate_ids,
+ )
+
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to enforce instance count")
+
+
+def ensure_present(existing_matches, desired_module_state):
+ tags = dict(module.params.get('tags') or {})
+ name = module.params.get('name')
+ if name:
+ tags['Name'] = name
+
+ try:
+ instance_spec = build_run_instance_spec(module.params)
+ # If check mode is enabled,suspend 'ensure function'.
+ if module.check_mode:
+ module.exit_json(
+ changed=True,
+ spec=instance_spec,
+ msg='Would have launched instances if not in check_mode.',
+ )
+ instance_response = run_instances(**instance_spec)
+ instances = instance_response['Instances']
+ instance_ids = [i['InstanceId'] for i in instances]
+
+ # Wait for instances to exist in the EC2 API before
+ # attempting to modify them
+ await_instances(instance_ids, desired_module_state='present', force_wait=True)
+
+ for ins in instances:
+ # Wait for instances to exist (don't check state)
+ try:
+ AWSRetry.jittered_backoff(
+ catch_extra_error_codes=['InvalidInstanceID.NotFound'],
+ )(
+ client.describe_instance_status
+ )(
+ InstanceIds=[ins['InstanceId']],
+ IncludeAllInstances=True,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to fetch status of new EC2 instance")
+ changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized'])
+ for c in changes:
+ try:
+ client.modify_instance_attribute(aws_retry=True, **c)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c)))
+
+ if not module.params.get('wait'):
+ module.exit_json(
+ changed=True,
+ instance_ids=instance_ids,
+ spec=instance_spec,
+ )
+ await_instances(instance_ids, desired_module_state=desired_module_state)
+ instances = find_instances(ids=instance_ids)
+
+ module.exit_json(
+ changed=True,
+ instances=[pretty_instance(i) for i in instances],
+ instance_ids=instance_ids,
+ spec=instance_spec,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to create new EC2 instance")
+
+
+def run_instances(**instance_spec):
+ try:
+ return client.run_instances(aws_retry=True, **instance_spec)
+ except is_boto3_error_message('Invalid IAM Instance Profile ARN'):
+ # If the instance profile has just been created, it takes some time to be visible by ec2
+ # So we wait 10 second and retry the run_instances
+ time.sleep(10)
+ return client.run_instances(aws_retry=True, **instance_spec)
+
+
+def build_filters():
+ filters = {
+ # all states except shutting-down and terminated
+ 'instance-state-name': ['pending', 'running', 'stopping', 'stopped'],
+ }
+ if isinstance(module.params.get('instance_ids'), string_types):
+ filters['instance-id'] = [module.params.get('instance_ids')]
+ elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')):
+ filters['instance-id'] = module.params.get('instance_ids')
+ else:
+ if not module.params.get('vpc_subnet_id'):
+ if module.params.get('network'):
+ # grab AZ from one of the ENIs
+ ints = module.params.get('network').get('interfaces')
+ if ints:
+ filters['network-interface.network-interface-id'] = []
+ for i in ints:
+ if isinstance(i, dict):
+ i = i['id']
+ filters['network-interface.network-interface-id'].append(i)
+ else:
+ sub = get_default_subnet(get_default_vpc(), availability_zone=module.params.get('availability_zone'))
+ filters['subnet-id'] = sub['SubnetId']
+ else:
+ filters['subnet-id'] = [module.params.get('vpc_subnet_id')]
+
+ if module.params.get('name'):
+ filters['tag:Name'] = [module.params.get('name')]
+ elif module.params.get('tags'):
+ name_tag = module.params.get('tags').get('Name', None)
+ if name_tag:
+ filters['tag:Name'] = [name_tag]
+
+ if module.params.get('image_id'):
+ filters['image-id'] = [module.params.get('image_id')]
+ elif (module.params.get('image') or {}).get('id'):
+ filters['image-id'] = [module.params.get('image', {}).get('id')]
+ return filters
+
+
+def main():
+ global module
+ global client
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']),
+ wait=dict(default=True, type='bool'),
+ wait_timeout=dict(default=600, type='int'),
+ count=dict(type='int'),
+ exact_count=dict(type='int'),
+ image=dict(type='dict'),
+ image_id=dict(type='str'),
+ instance_type=dict(type='str'),
+ user_data=dict(type='str'),
+ aap_callback=dict(
+ type='dict', aliases=['tower_callback'],
+ required_if=[
+ ('windows', False, ('tower_address', 'job_template_id', 'host_config_key',), False),
+ ],
+ options=dict(
+ windows=dict(type='bool', default=False),
+ set_password=dict(type='str', no_log=True),
+ tower_address=dict(type='str'),
+ job_template_id=dict(type='str'),
+ host_config_key=dict(type='str', no_log=True),
+ ),
+ ),
+ ebs_optimized=dict(type='bool'),
+ vpc_subnet_id=dict(type='str', aliases=['subnet_id']),
+ availability_zone=dict(type='str'),
+ security_groups=dict(default=[], type='list', elements='str'),
+ security_group=dict(type='str'),
+ iam_instance_profile=dict(type='str', aliases=['instance_role']),
+ name=dict(type='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ filters=dict(type='dict', default=None),
+ launch_template=dict(type='dict'),
+ key_name=dict(type='str'),
+ cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']),
+ cpu_options=dict(type='dict', options=dict(
+ core_count=dict(type='int', required=True),
+ threads_per_core=dict(type='int', choices=[1, 2], required=True)
+ )),
+ tenancy=dict(type='str', choices=['dedicated', 'default']),
+ placement_group=dict(type='str'),
+ instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']),
+ termination_protection=dict(type='bool'),
+ hibernation_options=dict(type='bool', default=False),
+ detailed_monitoring=dict(type='bool'),
+ instance_ids=dict(default=[], type='list', elements='str'),
+ network=dict(default=None, type='dict'),
+ volumes=dict(default=None, type='list', elements='dict'),
+ metadata_options=dict(
+ type='dict',
+ options=dict(
+ http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'),
+ http_put_response_hop_limit=dict(type='int', default=1),
+ http_tokens=dict(choices=['optional', 'required'], default='optional'),
+ http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'),
+ instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'),
+ )
+ ),
+ )
+ # running/present are synonyms
+ # as are terminated/absent
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['security_groups', 'security_group'],
+ ['availability_zone', 'vpc_subnet_id'],
+ ['aap_callback', 'user_data'],
+ ['image_id', 'image'],
+ ['exact_count', 'count'],
+ ['exact_count', 'instance_ids'],
+ ],
+ supports_check_mode=True
+ )
+
+ if not module.params.get('instance_type') and not module.params.get('launch_template'):
+ if module.params.get('state') not in ('absent', 'stopped'):
+ if module.params.get('count') or module.params.get('exact_count'):
+ module.deprecate("Default value instance_type has been deprecated, in the future you must set an instance_type or a launch_template",
+ date='2023-01-01', collection_name='amazon.aws')
+ result = dict()
+
+ if module.params.get('network'):
+ if module.params.get('network').get('interfaces'):
+ if module.params.get('security_group'):
+ module.fail_json(msg="Parameter network.interfaces can't be used with security_group")
+ if module.params.get('security_groups'):
+ module.fail_json(msg="Parameter network.interfaces can't be used with security_groups")
+
+ state = module.params.get('state')
+
+ retry_decorator = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=[
+ 'IncorrectState',
+ 'InsuffienctInstanceCapacity',
+ ]
+ )
+ client = module.client('ec2', retry_decorator=retry_decorator)
+
+ if module.params.get('filters') is None:
+ module.params['filters'] = build_filters()
+
+ existing_matches = find_instances(filters=module.params.get('filters'))
+
+ if state in ('terminated', 'absent'):
+ if existing_matches:
+ result = ensure_instance_state(state)
+ else:
+ result = dict(
+ msg='No matching instances found',
+ changed=False,
+ )
+ elif module.params.get('exact_count'):
+ enforce_count(existing_matches, module, desired_module_state=state)
+ elif existing_matches and not module.params.get('count'):
+ for match in existing_matches:
+ warn_if_public_ip_assignment_changed(match)
+ warn_if_cpu_options_changed(match)
+ result = handle_existing(existing_matches, state)
+ else:
+ result = ensure_present(existing_matches=existing_matches, desired_module_state=state)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py
new file mode 100644
index 00000000..bcfa55e6
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py
@@ -0,0 +1,587 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_instance_info
+version_added: 1.0.0
+short_description: Gather information about ec2 instances in AWS
+description:
+ - Gather information about ec2 instances in AWS
+author:
+ - Michael Schuett (@michaeljs1990)
+ - Rob White (@wimnat)
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ required: false
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+ minimum_uptime:
+ description:
+ - Minimum running uptime in minutes of instances. For example if I(uptime) is C(60) return all instances that have run more than 60 minutes.
+ required: false
+ aliases: ['uptime']
+ type: int
+
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all instances
+ amazon.aws.ec2_instance_info:
+
+- name: Gather information about all instances in AZ ap-southeast-2a
+ amazon.aws.ec2_instance_info:
+ filters:
+ availability-zone: ap-southeast-2a
+
+- name: Gather information about a particular instance using ID
+ amazon.aws.ec2_instance_info:
+ instance_ids:
+ - i-12345678
+
+- name: Gather information about any instance with a tag key Name and value Example
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:Name": Example
+
+- name: Gather information about any instance in states "shutting-down", "stopping", "stopped"
+ amazon.aws.ec2_instance_info:
+ filters:
+ instance-state-name: [ "shutting-down", "stopping", "stopped" ]
+
+- name: Gather information about any instance with Name beginning with RHEL and an uptime of at least 60 minutes
+ amazon.aws.ec2_instance_info:
+ region: "{{ ec2_region }}"
+ uptime: 60
+ filters:
+ "tag:Name": "RHEL-*"
+ instance-state-name: [ "running"]
+ register: ec2_node_info
+
+'''
+
+RETURN = r'''
+instances:
+ description: A list of ec2 instances.
+ returned: always
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image.
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume.
+ returned: always
+ type: str
+ sample: vol-12345678
+ cpu_options:
+ description: The CPU options set for the instance.
+ returned: always
+ type: complex
+ contains:
+ core_count:
+ description: The number of CPU cores for the instance.
+ returned: always
+ type: int
+ sample: 1
+ threads_per_core:
+ description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled.
+ returned: always
+ type: int
+ sample: 1
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::123456789012:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile.
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance.
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+import datetime
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.jittered_backoff()
+def _describe_instances(connection, **params):
+ paginator = connection.get_paginator('describe_instances')
+ return paginator.paginate(**params).build_full_result()
+
+
+def list_ec2_instances(connection, module):
+
+ instance_ids = module.params.get("instance_ids")
+ uptime = module.params.get('minimum_uptime')
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ reservations = _describe_instances(connection, InstanceIds=instance_ids, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to list ec2 instances")
+
+ instances = []
+
+ if uptime:
+ timedelta = int(uptime) if uptime else 0
+ oldest_launch_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=timedelta)
+ # Get instances from reservations
+ for reservation in reservations['Reservations']:
+ instances += [instance for instance in reservation['Instances'] if instance['LaunchTime'].replace(tzinfo=None) < oldest_launch_time]
+ else:
+ for reservation in reservations['Reservations']:
+ instances = instances + reservation['Instances']
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances]
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for instance in snaked_instances:
+ instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value')
+
+ module.exit_json(instances=snaked_instances)
+
+
+def main():
+
+ argument_spec = dict(
+ minimum_uptime=dict(required=False, type='int', default=None, aliases=['uptime']),
+ instance_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['instance_ids', 'filters']
+ ],
+ supports_check_mode=True,
+ )
+
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_ec2_instances(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_key.py b/ansible_collections/amazon/aws/plugins/modules/ec2_key.py
new file mode 100644
index 00000000..5d849802
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_key.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_key
+version_added: 1.0.0
+short_description: Create or delete an EC2 key pair
+description:
+ - Create or delete an EC2 key pair.
+options:
+ name:
+ description:
+ - Name of the key pair.
+ required: true
+ type: str
+ key_material:
+ description:
+ - Public key material.
+ required: false
+ type: str
+ force:
+ description:
+ - Force overwrite of already existing key pair if key has changed.
+ required: false
+ default: true
+ type: bool
+ state:
+ description:
+ - Create or delete keypair.
+ required: false
+ choices: [ present, absent ]
+ default: 'present'
+ type: str
+ key_type:
+ description:
+ - The type of key pair to create.
+ - Note that ED25519 keys are not supported for Windows instances,
+ EC2 Instance Connect, and EC2 Serial Console.
+ - By default Amazon will create an RSA key.
+ - Mutually exclusive with parameter I(key_material).
+ - Requires at least botocore version 1.21.23.
+ type: str
+ choices:
+ - rsa
+ - ed25519
+ version_added: 3.1.0
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 2.1.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+author:
+ - "Vincent Viallet (@zbal)"
+ - "Prasad Katti (@prasadkatti)"
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a new EC2 key pair, returns generated private key
+ amazon.aws.ec2_key:
+ name: my_keypair
+
+- name: create key pair using provided key_material
+ amazon.aws.ec2_key:
+ name: my_keypair
+ key_material: 'ssh-rsa AAAAxyz...== me@example.com'
+
+- name: create key pair using key_material obtained using 'file' lookup plugin
+ amazon.aws.ec2_key:
+ name: my_keypair
+ key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}"
+
+- name: Create ED25519 key pair
+ amazon.aws.ec2_key:
+ name: my_keypair
+ key_type: ed25519
+
+# try creating a key pair with the name of an already existing keypair
+# but don't overwrite it even if the key is different (force=false)
+- name: try creating a key pair with name of an already existing keypair
+ amazon.aws.ec2_key:
+ name: my_existing_keypair
+ key_material: 'ssh-rsa AAAAxyz...== me@example.com'
+ force: false
+
+- name: remove key pair by name
+ amazon.aws.ec2_key:
+ name: my_keypair
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: whether a keypair was created/deleted
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: short message describing the action taken
+ returned: always
+ type: str
+ sample: key pair created
+key:
+ description: details of the keypair (this is set to null when state is absent)
+ returned: always
+ type: complex
+ contains:
+ fingerprint:
+ description: fingerprint of the key
+ returned: when state is present
+ type: str
+ sample: 'b0:22:49:61:d9:44:9d:0c:7e:ac:8a:32:93:21:6c:e8:fb:59:62:43'
+ name:
+ description: name of the keypair
+ returned: when state is present
+ type: str
+ sample: my_keypair
+ id:
+ description: id of the keypair
+ returned: when state is present
+ type: str
+ sample: key-123456789abc
+ tags:
+ description: a dictionary representing the tags attached to the key pair
+ returned: when state is present
+ type: dict
+ sample: '{"my_key": "my value"}'
+ private_key:
+ description: private key of a newly created keypair
+ returned: when a new keypair is created by AWS (key_material is not provided)
+ type: str
+ sample: '-----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKC...
+ -----END RSA PRIVATE KEY-----'
+ type:
+ description: type of a newly created keypair
+ returned: when a new keypair is created by AWS
+ type: str
+ sample: rsa
+ version_added: 3.1.0
+'''
+
+import uuid
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_bytes
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+
+def extract_key_data(key, key_type=None):
+
+ data = {
+ 'name': key['KeyName'],
+ 'fingerprint': key['KeyFingerprint'],
+ 'id': key['KeyPairId'],
+ 'tags': {},
+ }
+ if 'Tags' in key:
+ data['tags'] = boto3_tag_list_to_ansible_dict(key['Tags'])
+ if 'KeyMaterial' in key:
+ data['private_key'] = key['KeyMaterial']
+ if 'KeyType' in key:
+ data['type'] = key['KeyType']
+ elif key_type:
+ data['type'] = key_type
+ return data
+
+
+def get_key_fingerprint(module, ec2_client, key_material):
+ '''
+ EC2's fingerprints are non-trivial to generate, so push this key
+ to a temporary name and make ec2 calculate the fingerprint for us.
+ http://blog.jbrowne.com/?p=23
+ https://forums.aws.amazon.com/thread.jspa?messageID=352828
+ '''
+
+ # find an unused name
+ name_in_use = True
+ while name_in_use:
+ random_name = "ansible-" + str(uuid.uuid4())
+ name_in_use = find_key_pair(module, ec2_client, random_name)
+
+ temp_key = _import_key_pair(module, ec2_client, random_name, key_material)
+ delete_key_pair(module, ec2_client, random_name, finish_task=False)
+ return temp_key['KeyFingerprint']
+
+
+def find_key_pair(module, ec2_client, name):
+
+ try:
+ key = ec2_client.describe_key_pairs(aws_retry=True, KeyNames=[name])['KeyPairs'][0]
+ except is_boto3_error_code('InvalidKeyPair.NotFound'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(err, msg="error finding keypair")
+ except IndexError:
+ key = None
+ return key
+
+
+def create_key_pair(module, ec2_client, name, key_material, force, key_type):
+
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ key = find_key_pair(module, ec2_client, name)
+ tag_spec = boto3_tag_specifications(tags, ['key-pair'])
+ changed = False
+ if key:
+ if key_material and force:
+ new_fingerprint = get_key_fingerprint(module, ec2_client, key_material)
+ if key['KeyFingerprint'] != new_fingerprint:
+ changed = True
+ if not module.check_mode:
+ delete_key_pair(module, ec2_client, name, finish_task=False)
+ key = _import_key_pair(module, ec2_client, name, key_material, tag_spec)
+ key_data = extract_key_data(key)
+ module.exit_json(changed=True, key=key_data, msg="key pair updated")
+ if key_type and key_type != key['KeyType']:
+ changed = True
+ if not module.check_mode:
+ delete_key_pair(module, ec2_client, name, finish_task=False)
+ key = _create_key_pair(module, ec2_client, name, tag_spec, key_type)
+ key_data = extract_key_data(key, key_type)
+ module.exit_json(changed=True, key=key_data, msg="key pair updated")
+ changed |= ensure_ec2_tags(ec2_client, module, key['KeyPairId'], tags=tags, purge_tags=purge_tags)
+ key = find_key_pair(module, ec2_client, name)
+ key_data = extract_key_data(key)
+ module.exit_json(changed=changed, key=key_data, msg="key pair already exists")
+ else:
+ # key doesn't exist, create it now
+ key_data = None
+ if not module.check_mode:
+ if key_material:
+ key = _import_key_pair(module, ec2_client, name, key_material, tag_spec)
+ else:
+ key = _create_key_pair(module, ec2_client, name, tag_spec, key_type)
+ key_data = extract_key_data(key, key_type)
+ module.exit_json(changed=True, key=key_data, msg="key pair created")
+
+
+def _create_key_pair(module, ec2_client, name, tag_spec, key_type):
+ params = dict(KeyName=name)
+ if tag_spec:
+ params['TagSpecifications'] = tag_spec
+ if key_type:
+ params['KeyType'] = key_type
+ try:
+ key = ec2_client.create_key_pair(aws_retry=True, **params)
+ except botocore.exceptions.ClientError as err:
+ module.fail_json_aws(err, msg="error creating key")
+ return key
+
+
+def _import_key_pair(module, ec2_client, name, key_material, tag_spec=None):
+ params = dict(KeyName=name, PublicKeyMaterial=to_bytes(key_material))
+ if tag_spec:
+ params['TagSpecifications'] = tag_spec
+ try:
+ key = ec2_client.import_key_pair(aws_retry=True, **params)
+ except botocore.exceptions.ClientError as err:
+ module.fail_json_aws(err, msg="error importing key")
+ return key
+
+
+def delete_key_pair(module, ec2_client, name, finish_task=True):
+
+ key = find_key_pair(module, ec2_client, name)
+ if key:
+ if not module.check_mode:
+ try:
+ ec2_client.delete_key_pair(aws_retry=True, KeyName=name)
+ except botocore.exceptions.ClientError as err:
+ module.fail_json_aws(err, msg="error deleting key")
+ if not finish_task:
+ return
+ module.exit_json(changed=True, key=None, msg="key deleted")
+ module.exit_json(key=None, msg="key did not exist")
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True),
+ key_material=dict(no_log=False),
+ force=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ key_type=dict(type='str', choices=['rsa', 'ed25519']),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['key_material', 'key_type']
+ ],
+ supports_check_mode=True
+ )
+
+ ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ name = module.params['name']
+ state = module.params.get('state')
+ key_material = module.params.get('key_material')
+ force = module.params.get('force')
+ key_type = module.params.get('key_type')
+
+ if key_type:
+ module.require_botocore_at_least('1.21.23', reason='to set the key_type for a keypair')
+
+ if state == 'absent':
+ delete_key_pair(module, ec2_client, name)
+ elif state == 'present':
+ create_key_pair(module, ec2_client, name, key_material, force, key_type)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
new file mode 100644
index 00000000..754fc34a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
@@ -0,0 +1,598 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_metadata_facts
+version_added: 1.0.0
+short_description: Gathers facts (instance metadata) about remote hosts within EC2
+author:
+ - Silviu Dicu (@silviud)
+ - Vinay Dandekar (@roadmapper)
+description:
+ - This module fetches data from the instance metadata endpoint in EC2 as per
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).
+ - The module must be called from within the EC2 instance itself.
+ - The module is configured to utilize the session oriented Instance Metadata Service v2 (IMDSv2)
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html).
+ - If the HttpEndpoint parameter
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceMetadataOptions.html#API_ModifyInstanceMetadataOptions_RequestParameters)
+ is set to disabled for the EC2 instance, the module will return an error while retrieving a session token.
+notes:
+ - Parameters to filter on ec2_metadata_facts may be added later.
+'''
+
+EXAMPLES = '''
+# Gather EC2 metadata facts
+- amazon.aws.ec2_metadata_facts:
+
+- debug:
+ msg: "This instance is a t1.micro"
+ when: ansible_ec2_instance_type == "t1.micro"
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary of new facts representing discovered properties of the EC2 instance.
+ returned: changed
+ type: complex
+ contains:
+ ansible_ec2_ami_id:
+ description: The AMI ID used to launch the instance.
+ type: str
+ sample: "ami-XXXXXXXX"
+ ansible_ec2_ami_launch_index:
+ description:
+ - If you started more than one instance at the same time, this value indicates the order in which the instance was launched.
+ - The value of the first instance launched is 0.
+ type: str
+ sample: "0"
+ ansible_ec2_ami_manifest_path:
+ description:
+ - The path to the AMI manifest file in Amazon S3.
+ - If you used an Amazon EBS-backed AMI to launch the instance, the returned result is unknown.
+ type: str
+ sample: "(unknown)"
+ ansible_ec2_ancestor_ami_ids:
+ description:
+ - The AMI IDs of any instances that were rebundled to create this AMI.
+ - This value will only exist if the AMI manifest file contained an ancestor-amis key.
+ type: str
+ sample: "(unknown)"
+ ansible_ec2_block_device_mapping_ami:
+ description: The virtual device that contains the root/boot file system.
+ type: str
+ sample: "/dev/sda1"
+ ansible_ec2_block_device_mapping_ebsN:
+ description:
+ - The virtual devices associated with Amazon EBS volumes, if any are present.
+ - Amazon EBS volumes are only available in metadata if they were present at launch time or when the instance was last started.
+ - The N indicates the index of the Amazon EBS volume (such as ebs1 or ebs2).
+ type: str
+ sample: "/dev/xvdb"
+ ansible_ec2_block_device_mapping_ephemeralN:
+ description: The virtual devices associated with ephemeral devices, if any are present. The N indicates the index of the ephemeral volume.
+ type: str
+ sample: "/dev/xvdc"
+ ansible_ec2_block_device_mapping_root:
+ description:
+ - The virtual devices or partitions associated with the root devices, or partitions on the virtual device,
+ where the root (/ or C) file system is associated with the given instance.
+ type: str
+ sample: "/dev/sda1"
+ ansible_ec2_block_device_mapping_swap:
+ description: The virtual devices associated with swap. Not always present.
+ type: str
+ sample: "/dev/sda2"
+ ansible_ec2_fws_instance_monitoring:
+ description: "Value showing whether the customer has enabled detailed one-minute monitoring in CloudWatch."
+ type: str
+ sample: "enabled"
+ ansible_ec2_hostname:
+ description:
+ - The private IPv4 DNS hostname of the instance.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "ip-10-0-0-1.ec2.internal"
+ ansible_ec2_iam_info:
+ description:
+ - If there is an IAM role associated with the instance, contains information about the last time the instance profile was updated,
+ including the instance's LastUpdated date, InstanceProfileArn, and InstanceProfileId. Otherwise, not present.
+ type: complex
+ sample: ""
+ contains:
+ LastUpdated:
+ description: The last time which InstanceProfile is associated with the Instance changed.
+ type: str
+ InstanceProfileArn:
+ description: The ARN of the InstanceProfile associated with the Instance.
+ type: str
+ InstanceProfileId:
+ description: The Id of the InstanceProfile associated with the Instance.
+ type: str
+ ansible_ec2_iam_info_instanceprofilearn:
+ description: The IAM instance profile ARN.
+ type: str
+ sample: "arn:aws:iam::123456789012:instance-profile/role_name"
+ ansible_ec2_iam_info_instanceprofileid:
+ description: IAM instance profile ID.
+ type: str
+ sample: ""
+ ansible_ec2_iam_info_lastupdated:
+ description: IAM info last updated time.
+ type: str
+ sample: "2017-05-12T02:42:27Z"
+ ansible_ec2_iam_instance_profile_role:
+ description: IAM instance role.
+ type: str
+ sample: "role_name"
+ ansible_ec2_iam_security_credentials_role_name:
+ description:
+ - If there is an IAM role associated with the instance, role-name is the name of the role,
+ and role-name contains the temporary security credentials associated with the role. Otherwise, not present.
+ type: str
+ sample: ""
+ ansible_ec2_iam_security_credentials_role_name_accesskeyid:
+ description: IAM role access key ID.
+ type: str
+ sample: ""
+ ansible_ec2_iam_security_credentials_role_name_code:
+ description: IAM code.
+ type: str
+ sample: "Success"
+ ansible_ec2_iam_security_credentials_role_name_expiration:
+ description: IAM role credentials expiration time.
+ type: str
+ sample: "2017-05-12T09:11:41Z"
+ ansible_ec2_iam_security_credentials_role_name_lastupdated:
+ description: IAM role last updated time.
+ type: str
+ sample: "2017-05-12T02:40:44Z"
+ ansible_ec2_iam_security_credentials_role_name_secretaccesskey:
+ description: IAM role secret access key.
+ type: str
+ sample: ""
+ ansible_ec2_iam_security_credentials_role_name_token:
+ description: IAM role token.
+ type: str
+ sample: ""
+ ansible_ec2_iam_security_credentials_role_name_type:
+ description: IAM role type.
+ type: str
+ sample: "AWS-HMAC"
+ ansible_ec2_instance_action:
+ description: Notifies the instance that it should reboot in preparation for bundling.
+ type: str
+ sample: "none"
+ ansible_ec2_instance_id:
+ description: The ID of this instance.
+ type: str
+ sample: "i-XXXXXXXXXXXXXXXXX"
+ ansible_ec2_instance_identity_document:
+ description: JSON containing instance attributes, such as instance-id, private IP address, etc.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_accountid:
+ description: ""
+ type: str
+ sample: "123456789012"
+ ansible_ec2_instance_identity_document_architecture:
+ description: Instance system architecture.
+ type: str
+ sample: "x86_64"
+ ansible_ec2_instance_identity_document_availabilityzone:
+ description: The Availability Zone in which the instance launched.
+ type: str
+ sample: "us-east-1a"
+ ansible_ec2_instance_identity_document_billingproducts:
+ description: Billing products for this instance.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_devpayproductcodes:
+ description: Product codes for the launched AMI.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_imageid:
+ description: The AMI ID used to launch the instance.
+ type: str
+ sample: "ami-01234567"
+ ansible_ec2_instance_identity_document_instanceid:
+ description: The ID of this instance.
+ type: str
+ sample: "i-0123456789abcdef0"
+ ansible_ec2_instance_identity_document_instancetype:
+ description: The type of instance.
+ type: str
+ sample: "m4.large"
+ ansible_ec2_instance_identity_document_kernelid:
+ description: The ID of the kernel launched with this instance, if applicable.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_pendingtime:
+ description: The instance pending time.
+ type: str
+ sample: "2017-05-11T20:51:20Z"
+ ansible_ec2_instance_identity_document_privateip:
+ description:
+ - The private IPv4 address of the instance.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "10.0.0.1"
+ ansible_ec2_instance_identity_document_ramdiskid:
+ description: The ID of the RAM disk specified at launch time, if applicable.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_region:
+ description: The Region in which the instance launched.
+ type: str
+ sample: "us-east-1"
+ ansible_ec2_instance_identity_document_version:
+ description: Identity document version.
+ type: str
+ sample: "2010-08-31"
+ ansible_ec2_instance_identity_pkcs7:
+ description: Used to verify the document's authenticity and content against the signature.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_rsa2048:
+ description: Used to verify the document's authenticity and content against the signature.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_signature:
+ description: Data that can be used by other parties to verify its origin and authenticity.
+ type: str
+ sample: ""
+ ansible_ec2_instance_life_cycle:
+ description: The purchasing option of the instance.
+ type: str
+ sample: "on-demand"
+ ansible_ec2_instance_type:
+ description: The type of the instance.
+ type: str
+ sample: "m4.large"
+ ansible_ec2_local_hostname:
+ description:
+ - The private IPv4 DNS hostname of the instance.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "ip-10-0-0-1.ec2.internal"
+ ansible_ec2_local_ipv4:
+ description:
+ - The private IPv4 address of the instance.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "10.0.0.1"
+ ansible_ec2_mac:
+ description:
+ - The instance's media access control (MAC) address.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "00:11:22:33:44:55"
+ ansible_ec2_metrics_vhostmd:
+ description: Metrics; no longer available.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_mac_address_device_number:
+ description:
+ - The unique device number associated with that interface. The device number corresponds to the device name;
+ for example, a device-number of 2 is for the eth2 device.
+ - This category corresponds to the DeviceIndex and device-index fields that are used by the Amazon EC2 API and the EC2 commands for the AWS CLI.
+ type: str
+ sample: "0"
+ ansible_ec2_network_interfaces_macs_mac_address_interface_id:
+ description: The elastic network interface ID.
+ type: str
+ sample: "eni-12345678"
+ ansible_ec2_network_interfaces_macs_mac_address_ipv4_associations_ip_address:
+ description: The private IPv4 addresses that are associated with each public-ip address and assigned to that interface.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_mac_address_ipv6s:
+ description: The IPv6 addresses associated with the interface. Returned only for instances launched into a VPC.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_mac_address_local_hostname:
+ description: The interface's local hostname.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_mac_address_local_ipv4s:
+ description: The private IPv4 addresses associated with the interface.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_mac_address_mac:
+ description: The instance's MAC address.
+ type: str
+ sample: "00:11:22:33:44:55"
+ ansible_ec2_network_interfaces_macs_mac_address_owner_id:
+ description:
+ - The ID of the owner of the network interface.
+ - In multiple-interface environments, an interface can be attached by a third party, such as Elastic Load Balancing.
+ - Traffic on an interface is always billed to the interface owner.
+ type: str
+ sample: "123456789012"
+ ansible_ec2_network_interfaces_macs_mac_address_public_hostname:
+ description:
+ - The interface's public DNS (IPv4). If the instance is in a VPC,
+ this category is only returned if the enableDnsHostnames attribute is set to true.
+ type: str
+ sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
+ ansible_ec2_network_interfaces_macs_mac_address_public_ipv4s:
+ description: The Elastic IP addresses associated with the interface. There may be multiple IPv4 addresses on an instance.
+ type: str
+ sample: "1.2.3.4"
+ ansible_ec2_network_interfaces_macs_mac_address_security_group_ids:
+ description: The IDs of the security groups to which the network interface belongs. Returned only for instances launched into a VPC.
+ type: str
+ sample: "sg-01234567,sg-01234568"
+ ansible_ec2_network_interfaces_macs_mac_address_security_groups:
+ description: Security groups to which the network interface belongs. Returned only for instances launched into a VPC.
+ type: str
+ sample: "secgroup1,secgroup2"
+ ansible_ec2_network_interfaces_macs_mac_address_subnet_id:
+ description: The ID of the subnet in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "subnet-01234567"
+ ansible_ec2_network_interfaces_macs_mac_address_subnet_ipv4_cidr_block:
+ description: The IPv4 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "10.0.1.0/24"
+ ansible_ec2_network_interfaces_macs_mac_address_subnet_ipv6_cidr_blocks:
+ description: The IPv6 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_mac_address_vpc_id:
+ description: The ID of the VPC in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "vpc-0123456"
+ ansible_ec2_network_interfaces_macs_mac_address_vpc_ipv4_cidr_block:
+ description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "10.0.0.0/16"
+ ansible_ec2_network_interfaces_macs_mac_address_vpc_ipv4_cidr_blocks:
+ description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "10.0.0.0/16"
+ ansible_ec2_network_interfaces_macs_mac_address_vpc_ipv6_cidr_blocks:
+ description: The IPv6 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: ""
+ ansible_ec2_placement_availability_zone:
+ description: The Availability Zone in which the instance launched.
+ type: str
+ sample: "us-east-1a"
+ ansible_ec2_placement_region:
+ description: The Region in which the instance launched.
+ type: str
+ sample: "us-east-1"
+ ansible_ec2_product_codes:
+ description: Product codes associated with the instance, if any.
+ type: str
+ sample: "aw0evgkw8e5c1q413zgy5pjce"
+ ansible_ec2_profile:
+ description: EC2 instance hardware profile.
+ type: str
+ sample: "default-hvm"
+ ansible_ec2_public_hostname:
+ description:
+ - The instance's public DNS. If the instance is in a VPC, this category is only returned if the enableDnsHostnames attribute is set to true.
+ type: str
+ sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
+ ansible_ec2_public_ipv4:
+ description: The public IPv4 address. If an Elastic IP address is associated with the instance, the value returned is the Elastic IP address.
+ type: str
+ sample: "1.2.3.4"
+ ansible_ec2_public_key:
+ description: Public key. Only available if supplied at instance launch time.
+ type: str
+ sample: ""
+ ansible_ec2_ramdisk_id:
+ description: The ID of the RAM disk specified at launch time, if applicable.
+ type: str
+ sample: ""
+ ansible_ec2_reservation_id:
+ description: The ID of the reservation.
+ type: str
+ sample: "r-0123456789abcdef0"
+ ansible_ec2_security_groups:
+ description:
+ - The names of the security groups applied to the instance. After launch, you can only change the security groups of instances running in a VPC.
+ - Such changes are reflected here and in network/interfaces/macs/mac/security-groups.
+ type: str
+ sample: "securitygroup1,securitygroup2"
+ ansible_ec2_services_domain:
+ description: The domain for AWS resources for the region; for example, amazonaws.com for us-east-1.
+ type: str
+ sample: "amazonaws.com"
+ ansible_ec2_services_partition:
+ description:
+ - The partition that the resource is in. For standard AWS regions, the partition is aws.
+ - If you have resources in other partitions, the partition is aws-partitionname.
+ - For example, the partition for resources in the China (Beijing) region is aws-cn.
+ type: str
+ sample: "aws"
+ ansible_ec2_spot_termination_time:
+ description:
+ - The approximate time, in UTC, that the operating system for your Spot instance will receive the shutdown signal.
+ - This item is present and contains a time value only if the Spot instance has been marked for termination by Amazon EC2.
+ - The termination-time item is not set to a time if you terminated the Spot instance yourself.
+ type: str
+ sample: "2015-01-05T18:02:00Z"
+ ansible_ec2_user_data:
+ description: The instance user data.
+ type: str
+ sample: "#!/bin/bash"
+'''
+
+import json
+import re
+import socket
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+socket.setdefaulttimeout(5)
+
+
+class Ec2Metadata(object):
+ ec2_metadata_token_uri = 'http://169.254.169.254/latest/api/token'
+ ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
+ ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
+ ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
+ ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/'
+
+ def __init__(self, module, ec2_metadata_token_uri=None, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None, ec2_dynamicdata_uri=None):
+ self.module = module
+ self.uri_token = ec2_metadata_token_uri or self.ec2_metadata_token_uri
+ self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
+ self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
+ self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
+ self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri
+ self._data = {}
+ self._token = None
+ self._prefix = 'ansible_ec2_%s'
+
+ def _fetch(self, url):
+ encoded_url = quote(url, safe='%/:=&?~#+!$,;\'@()*[]')
+ headers = {}
+ if self._token:
+ headers = {'X-aws-ec2-metadata-token': self._token}
+ response, info = fetch_url(self.module, encoded_url, headers=headers, force=True)
+
+ if info.get('status') in (401, 403):
+ self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info)
+ elif info.get('status') not in (200, 404):
+ time.sleep(3)
+ # request went bad, retry once then raise
+ self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg']))
+ response, info = fetch_url(self.module, encoded_url, headers=headers, force=True)
+ if info.get('status') not in (200, 404):
+ # fail out now
+ self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info)
+ if response and info['status'] < 400:
+ data = response.read()
+ else:
+ data = None
+ return to_text(data)
+
+ def _mangle_fields(self, fields, uri, filter_patterns=None):
+ filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns
+
+ new_fields = {}
+ for key, value in fields.items():
+ split_fields = key[len(uri):].split('/')
+ # Parse out the IAM role name (which is _not_ the same as the instance profile name)
+ if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and ':' not in split_fields[2]:
+ new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2]
+ if len(split_fields) > 1 and split_fields[1]:
+ new_key = "-".join(split_fields)
+ new_fields[self._prefix % new_key] = value
+ else:
+ new_key = "".join(split_fields)
+ new_fields[self._prefix % new_key] = value
+ for pattern in filter_patterns:
+ for key in dict(new_fields):
+ match = re.search(pattern, key)
+ if match:
+ new_fields.pop(key)
+ return new_fields
+
+ def fetch(self, uri, recurse=True):
+ raw_subfields = self._fetch(uri)
+ if not raw_subfields:
+ return
+ subfields = raw_subfields.split('\n')
+ for field in subfields:
+ if field.endswith('/') and recurse:
+ self.fetch(uri + field)
+ if uri.endswith('/'):
+ new_uri = uri + field
+ else:
+ new_uri = uri + '/' + field
+ if new_uri not in self._data and not new_uri.endswith('/'):
+ content = self._fetch(new_uri)
+ if field == 'security-groups' or field == 'security-group-ids':
+ sg_fields = ",".join(content.split('\n'))
+ self._data['%s' % (new_uri)] = sg_fields
+ else:
+ try:
+ json_dict = json.loads(content)
+ self._data['%s' % (new_uri)] = content
+ for (key, value) in json_dict.items():
+ self._data['%s:%s' % (new_uri, key.lower())] = value
+ except (json.JSONDecodeError, AttributeError):
+ self._data['%s' % (new_uri)] = content # not a stringified JSON string
+
+ def fix_invalid_varnames(self, data):
+ """Change ':'' and '-' to '_' to ensure valid template variable names"""
+ new_data = data.copy()
+ for key, value in data.items():
+ if ':' in key or '-' in key:
+ newkey = re.sub(':|-', '_', key)
+ new_data[newkey] = value
+ del new_data[key]
+
+ return new_data
+
+ def fetch_session_token(self, uri_token):
+ """Used to get a session token for IMDSv2"""
+ headers = {'X-aws-ec2-metadata-token-ttl-seconds': '60'}
+ response, info = fetch_url(self.module, uri_token, method='PUT', headers=headers, force=True)
+
+ if info.get('status') == 403:
+ self.module.fail_json(msg='Failed to retrieve metadata token from AWS: {0}'.format(info['msg']), response=info)
+ elif info.get('status') not in (200, 404):
+ time.sleep(3)
+ # request went bad, retry once then raise
+ self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg']))
+ response, info = fetch_url(self.module, uri_token, method='PUT', headers=headers, force=True)
+ if info.get('status') not in (200, 404):
+ # fail out now
+ self.module.fail_json(msg='Failed to retrieve metadata token from AWS: {0}'.format(info['msg']), response=info)
+ if response:
+ token_data = response.read()
+ else:
+ token_data = None
+ return to_text(token_data)
+
+ def run(self):
+ self._token = self.fetch_session_token(self.uri_token) # create session token for IMDS
+ self.fetch(self.uri_meta) # populate _data with metadata
+ data = self._mangle_fields(self._data, self.uri_meta)
+ data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
+ data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
+
+ self._data = {} # clear out metadata in _data
+ self.fetch(self.uri_dynamic) # populate _data with dynamic data
+ dyndata = self._mangle_fields(self._data, self.uri_dynamic)
+ data.update(dyndata)
+ data = self.fix_invalid_varnames(data)
+
+ # Maintain old key for backwards compatibility
+ if 'ansible_ec2_instance_identity_document_region' in data:
+ data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region']
+ return data
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ ec2_metadata_facts = Ec2Metadata(module).run()
+ ec2_metadata_facts_result = dict(changed=False, ansible_facts=ec2_metadata_facts)
+
+ module.exit_json(**ec2_metadata_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py
new file mode 100644
index 00000000..d4fa9b56
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py
@@ -0,0 +1,1483 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_security_group
+version_added: 1.0.0
+author:
+ - "Andrew de Quincey (@adq)"
+ - "Razique Mahroua (@Razique)"
+short_description: Maintain an EC2 security group
+description:
+ - Maintains EC2 security groups.
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ type: str
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ type: str
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ type: str
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ type: str
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in I(group_name).
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ - Support for passing nested lists of strings to I(cidr_ip) has been deprecated and will
+ be removed in a release after 2024-12-01.
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ - Support for passing nested lists of strings to I(cidr_ipv6) has been deprecated and will
+ be removed in a release after 2024-12-01.
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: list
+ elements: str
+ description:
+ - Name of the Security Group that traffic is coming from.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - I(group_name) can accept values of type str and list.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or
+ - number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ - When using C(icmp) or C(icmpv6) as the protocol, you can pass
+ - the C(icmp_type) and C(icmp_code) parameters instead of
+ - C(from_port) and C(to_port).
+ from_port:
+ type: int
+ description:
+ - The start of the range of ports that traffic is coming from.
+ - A value can be between C(0) to C(65535).
+ - A value of C(-1) indicates all ports (only supported when I(proto=icmp)).
+ to_port:
+ type: int
+ description:
+ - The end of the range of ports that traffic is coming from.
+ - A value can be between C(0) to C(65535).
+ - A value of C(-1) indicates all ports (only supported when I(proto=icmp)).
+ icmp_type:
+ version_added: 3.3.0
+ type: int
+ description:
+ - When using C(icmp) or C(icmpv6) as the protocol, allows you to
+ - specify the ICMP type to use. The option is mutually exclusive with C(from_port).
+ - A value of C(-1) indicates all ICMP types.
+ icmp_code:
+ version_added: 3.3.0
+ type: int
+ description:
+ - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify
+ - the ICMP code to use. The option is mutually exclusive with C(to_port).
+ - A value of C(-1) indicates all ICMP codes.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ required: false
+ type: list
+ elements: dict
+ aliases: ['egress_rules']
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ - Support for passing nested lists of strings to I(cidr_ip) has been deprecated and will
+ be removed in a release after 2024-12-01.
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ - Support for passing nested lists of strings to I(cidr_ipv6) has been deprecated and will
+ be removed in a release after 2024-12-01.
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is going to.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or
+ - number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ - When using C(icmp) or C(icmpv6) as the protocol, you can pass the
+ - C(icmp_type) and C(icmp_code) parameters instead of C(from_port) and C(to_port).
+ from_port:
+ type: int
+ description:
+ - The start of the range of ports that traffic is going to.
+ - A value can be between C(0) to C(65535).
+ - A value of C(-1) indicates all ports (only supported when I(proto=icmp)).
+ to_port:
+ type: int
+ description:
+ - The end of the range of ports that traffic is going to.
+ - A value can be between C(0) to C(65535).
+ - A value of C(-1) indicates all ports (only supported when I(proto=icmp)).
+ icmp_type:
+ version_added: 3.3.0
+ type: int
+ description:
+ - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify
+ - the ICMP type to use. The option is mutually exclusive with C(from_port).
+ - A value of C(-1) indicates all ICMP types.
+ icmp_code:
+ version_added: 3.3.0
+ type: int
+ description:
+ - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify
+ - the ICMP code to use. The option is mutually exclusive with C(to_port).
+ - A value of C(-1) indicates all ICMP codes.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ state:
+ description:
+ - Create or delete a security group.
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ type: str
+ purge_rules:
+ description:
+ - Purge existing rules on security group that are not found in rules.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ purge_rules_egress:
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress.
+ required: false
+ default: 'true'
+ aliases: ['purge_egress_rules']
+ type: bool
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+ - Prior to release 5.0.0 this module was called C(amazon.aws.ec2_group_info). The usage did not
+ change.
+'''
+
+EXAMPLES = '''
+- name: example using security group rule descriptions
+ amazon.aws.ec2_security_group:
+ name: "{{ name }}"
+ description: sg with rule descriptions
+ vpc_id: vpc-xxxxxxxx
+ profile: "{{ aws_profile }}"
+ region: us-east-1
+ rules:
+ - proto: tcp
+ ports:
+ - 80
+ cidr_ip: 0.0.0.0/0
+ rule_desc: allow all on port 80
+
+- name: example using ICMP types and codes
+ amazon.aws.ec2_security_group:
+ name: "{{ name }}"
+ description: sg for ICMP
+ vpc_id: vpc-xxxxxxxx
+ profile: "{{ aws_profile }}"
+ region: us-east-1
+ rules:
+ - proto: icmp
+ icmp_type: 3
+ icmp_code: 1
+ cidr_ip: 0.0.0.0/0
+
+- name: example ec2 group
+ amazon.aws.ec2_security_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ # this should only be needed for EC2 Classic security group rules
+ # because in a VPC an ELB will use a user-account security group
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123456789012/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ - proto: all
+ # in the 'proto' attribute, if you specify -1 (only supported when I(proto=icmp)), all, or a protocol number
+ # other than tcp, udp, icmp, or 58 (ICMPv6), traffic on all ports is allowed, regardless of any ports that
+ # you specify.
+ from_port: 10050 # this value is ignored
+ to_port: 10050 # this value is ignored
+ cidr_ip: 10.0.0.0/8
+
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ amazon.aws.ec2_security_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single
+ # port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to
+ # define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+ diff: True
+
+- name: "Delete group by its id"
+ amazon.aws.ec2_security_group:
+ region: eu-west-1
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: str
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: str
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: str
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: str
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "198.51.100.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import itertools
+import json
+import re
+from collections import namedtuple
+from copy import deepcopy
+from ipaddress import IPv6Network
+from ipaddress import ip_network
+from time import sleep
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.network import to_ipv6_subnet
+from ansible.module_utils.common.network import to_subnet
+from ansible.module_utils.six import string_types
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
+valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
+current_account_id = None
+
+
+def rule_cmp(a, b):
+ """Compare rules without descriptions"""
+ for prop in ['port_range', 'protocol', 'target', 'target_type']:
+ if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
+ # equal protocols can interchange `(-1, -1)` and `(None, None)`
+ if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
+ continue
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ return True
+
+
+def rules_to_permissions(rules):
+ return [to_permission(rule) for rule in rules]
+
+
+def to_permission(rule):
+ # take a Rule, output the serialized grant
+ perm = {
+ 'IpProtocol': rule.protocol,
+ }
+ perm['FromPort'], perm['ToPort'] = rule.port_range
+ if rule.target_type == 'ipv4':
+ perm['IpRanges'] = [{
+ 'CidrIp': rule.target,
+ }]
+ if rule.description:
+ perm['IpRanges'][0]['Description'] = rule.description
+ elif rule.target_type == 'ipv6':
+ perm['Ipv6Ranges'] = [{
+ 'CidrIpv6': rule.target,
+ }]
+ if rule.description:
+ perm['Ipv6Ranges'][0]['Description'] = rule.description
+ elif rule.target_type == 'group':
+ if isinstance(rule.target, tuple):
+ pair = {}
+ if rule.target[0]:
+ pair['UserId'] = rule.target[0]
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ if rule.target[1]:
+ pair['GroupId'] = rule.target[1]
+ elif rule.target[2]:
+ pair['GroupName'] = rule.target[2]
+ perm['UserIdGroupPairs'] = [pair]
+ else:
+ perm['UserIdGroupPairs'] = [{
+ 'GroupId': rule.target
+ }]
+ if rule.description:
+ perm['UserIdGroupPairs'][0]['Description'] = rule.description
+ elif rule.target_type == 'ip_prefix':
+ perm['PrefixListIds'] = [{
+ 'PrefixListId': rule.target,
+ }]
+ if rule.description:
+ perm['PrefixListIds'][0]['Description'] = rule.description
+ elif rule.target_type not in valid_targets:
+ raise ValueError('Invalid target type for rule {0}'.format(rule))
+ return fix_port_and_protocol(perm)
+
+
+def rule_from_group_permission(perm):
+ """
+ Returns a rule dict from an existing security group.
+
+ When using a security group as a target all 3 fields (OwnerId, GroupId, and
+ GroupName) need to exist in the target. This ensures consistency of the
+ values that will be compared to desired_ingress or desired_egress
+ in wait_for_rule_propagation().
+ GroupId is preferred as it is more specific except when targeting 'amazon-'
+ prefixed security groups (such as EC2 Classic ELBs).
+ """
+ def ports_from_permission(p):
+ if 'FromPort' not in p and 'ToPort' not in p:
+ return (None, None)
+ return (int(perm['FromPort']), int(perm['ToPort']))
+
+ # outputs a rule tuple
+ for target_key, target_subkey, target_type in [
+ ('IpRanges', 'CidrIp', 'ipv4'),
+ ('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
+ ('PrefixListIds', 'PrefixListId', 'ip_prefix'),
+ ]:
+ if target_key not in perm:
+ continue
+ for r in perm[target_key]:
+ # there may be several IP ranges here, which is ok
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ r[target_subkey],
+ target_type,
+ r.get('Description')
+ )
+ if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
+ for pair in perm['UserIdGroupPairs']:
+ target = (
+ pair.get('UserId', current_account_id),
+ pair.get('GroupId', None),
+ None,
+ )
+ if pair.get('UserId', '').startswith('amazon-'):
+ # amazon-elb and amazon-prefix rules don't need
+ # group-id specified, so remove it when querying
+ # from permission
+ target = (
+ pair.get('UserId', None),
+ None,
+ pair.get('GroupName', None),
+ )
+ elif 'VpcPeeringConnectionId' not in pair and pair['UserId'] != current_account_id:
+ # EC2-Classic cross-account
+ pass
+ elif 'VpcPeeringConnectionId' in pair:
+ # EC2-VPC cross-account VPC peering
+ target = (
+ pair.get('UserId', None),
+ pair.get('GroupId', None),
+ None,
+ )
+
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ target,
+ 'group',
+ pair.get('Description')
+ )
+
+
+# Wrap just this method so we can retry on missing groups
+@AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=['InvalidGroup.NotFound'])
+def get_security_groups_with_backoff(client, **kwargs):
+ return client.describe_security_groups(**kwargs)
+
+
+def sg_exists_with_backoff(client, **kwargs):
+ try:
+ return client.describe_security_groups(aws_retry=True, **kwargs)
+ except is_boto3_error_code('InvalidGroup.NotFound'):
+ return {'SecurityGroups': []}
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = (
+ 'cidr_ip',
+ 'cidr_ipv6',
+ 'ip_prefix',
+ 'group_id',
+ 'group_name',
+ 'group_desc',
+ 'proto',
+ 'from_port',
+ 'to_port',
+ 'icmp_type',
+ 'icmp_code',
+ 'icmp_keys',
+ 'rule_desc',
+ )
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+ elif ('icmp_type' in rule or 'icmp_code' in rule) and 'ports' in rule:
+ module.fail_json(msg='Specify icmp_code/icmp_type OR ports, not both')
+ elif ('from_port' in rule or 'to_port' in rule) and ('icmp_type' in rule or 'icmp_code' in rule) and 'icmp_keys' not in rule:
+ module.fail_json(msg='Specify from_port/to_port OR icmp_type/icmp_code, not both')
+ elif ('icmp_type' in rule or 'icmp_code' in rule) and ('icmp' not in rule['proto']):
+ module.fail_json(msg='Specify proto: icmp or icmpv6 when using icmp_type/icmp_code')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (target_type, target, group_created) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+
+ When using a security group as a target all 3 fields (OwnerId, GroupId, and
+ GroupName) need to exist in the target. This ensures consistency of the
+ values that will be compared to current_rules (from current_ingress and
+ current_egress) in wait_for_rule_propagation().
+ """
+ FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
+ owner_id = current_account_id
+ group_id = None
+ group_name = None
+ target_group_created = False
+
+ validate_rule(module, rule)
+ if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ # Matches on groups like amazon-elb/sg-5a9c116a/amazon-elb-sg, amazon-elb/amazon-elb-sg,
+ # and peer-VPC groups like 0987654321/sg-1234567890/example
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ if group_id and group_name:
+ if group_name.startswith('amazon-'):
+ # amazon-elb and amazon-prefix rules don't need group_id specified,
+ group_id = None
+ else:
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ group_name = None
+ return 'group', (owner_id, group_id, group_name), False
+ elif 'group_id' in rule:
+ return 'group', (owner_id, rule['group_id'], None), False
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ auto_group = None
+ filters = {'group-name': group_name}
+ if vpc_id:
+ filters['vpc-id'] = vpc_id
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ # retry describing the group once
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
+ module.fail_json(msg="group %s will be automatically created by rule %s but "
+ "no description was provided" % (group_name, rule))
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ elif not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ auto_group = client.create_security_group(aws_retry=True, **params)
+ get_waiter(
+ client, 'security_group_exists',
+ ).wait(
+ GroupIds=[auto_group['GroupId']],
+ )
+ except is_boto3_error_code('InvalidGroup.Duplicate'):
+ # The group exists, but didn't show up in any of our describe-security-groups calls
+ # Try searching on a filter for the name, and allow a retry window for AWS to update
+ # the model on their end.
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except IndexError:
+ module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ except ClientError as e:
+ module.fail_json_aws(
+ e,
+ msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ if auto_group is not None:
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ return 'group', (owner_id, group_id, None), target_group_created
+ elif 'cidr_ip' in rule:
+ return 'ipv4', validate_ip(module, rule['cidr_ip']), False
+ elif 'cidr_ipv6' in rule:
+ return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
+ elif 'ip_prefix' in rule:
+ return 'ip_prefix', rule['ip_prefix'], False
+
+ module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, string_types):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((int(port.strip()),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ # uses icmp_code and icmp_type instead of from_ports and to_ports when
+ # available.
+ if 'ports' not in rule:
+ non_icmp_params = any([
+ rule.get('icmp_type', None) is None, rule.get('icmp_code', None) is None])
+ conflict = not non_icmp_params and any([
+ rule.get('from_port', None), rule.get('to_port', None)])
+
+ if non_icmp_params:
+ if isinstance(rule.get('from_port'), string_types):
+ rule['from_port'] = int(rule.get('from_port'))
+ if isinstance(rule.get('to_port'), string_types):
+ rule['to_port'] = int(rule.get('to_port'))
+ else:
+ rule['from_port'] = int(rule.get('icmp_type')) if isinstance(rule.get('icmp_type'), string_types) else rule.get('icmp_type')
+ rule['to_port'] = int(rule.get('icmp_code')) if isinstance(rule.get('icmp_code'), string_types) else rule.get('icmp_code')
+ # Used temporarily to track the fact that icmp keys were converted
+ # to from_port/to_port
+ if not conflict:
+ rule['icmp_keys'] = True
+
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def update_rules_description(module, client, rule_type, group_id, ip_permissions):
+ if module.check_mode:
+ return
+ try:
+ if rule_type == "in":
+ client.update_security_group_rule_descriptions_ingress(
+ aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions)
+ if rule_type == "out":
+ client.update_security_group_rule_descriptions_egress(
+ aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
+
+
+def fix_port_and_protocol(permission):
+ for key in ('FromPort', 'ToPort'):
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = to_text(permission['IpProtocol'])
+
+ return permission
+
+
+def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
+ if revoke_ingress:
+ revoke(client, module, revoke_ingress, group_id, 'in')
+ if revoke_egress:
+ revoke(client, module, revoke_egress, group_id, 'out')
+ return bool(revoke_ingress or revoke_egress)
+
+
+def revoke(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.revoke_security_group_ingress(
+ aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.revoke_security_group_egress(
+ aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
+
+
+def add_new_permissions(client, module, new_ingress, new_egress, group_id):
+ if new_ingress:
+ authorize(client, module, new_ingress, group_id, 'in')
+ if new_egress:
+ authorize(client, module, new_egress, group_id, 'out')
+ return bool(new_ingress or new_egress)
+
+
+def authorize(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.authorize_security_group_ingress(
+ aws_retry=True,
+ GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.authorize_security_group_egress(
+ aws_retry=True,
+ GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
+
+
+def validate_ip(module, cidr_ip):
+ split_addr = cidr_ip.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
+ # Get the network bits if IPv4, and validate if IPv6.
+ try:
+ ip = to_subnet(split_addr[0], split_addr[1])
+ if ip != cidr_ip:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(
+ cidr_ip, ip))
+ except ValueError:
+ # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
+ try:
+ isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
+ ip = cidr_ip
+ except ValueError:
+ # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
+ # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
+ ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
+ if ip6 != cidr_ip:
+ module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
+ return ip6
+ return ip
+ return cidr_ip
+
+
+def update_tags(client, module, group_id, current_tags, tags, purge_tags):
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if not module.check_mode:
+ if tags_to_delete:
+ try:
+ client.delete_tags(aws_retry=True, Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(aws_retry=True, Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to add tags {0}".format(tags_need_modify))
+
+ return bool(tags_need_modify or tags_to_delete)
+
+
+def update_rule_descriptions(module, client, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
+ changed = False
+ ingress_needs_desc_update = []
+ egress_needs_desc_update = []
+
+ for present_rule in present_egress:
+ needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_egress_list.remove(r)
+ egress_needs_desc_update.extend(needs_update)
+ for present_rule in present_ingress:
+ needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_ingress_list.remove(r)
+ ingress_needs_desc_update.extend(needs_update)
+
+ if ingress_needs_desc_update:
+ update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
+ changed |= True
+ if egress_needs_desc_update:
+ update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
+ changed |= True
+ return changed
+
+
+def create_security_group(client, module, name, description, vpc_id):
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ group = client.create_security_group(aws_retry=True, **params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create security group")
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ sleep(3)
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+ return group
+ return None
+
+
+def wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
+ group_id = group['GroupId']
+ tries = 6
+
+ def await_rules(group, desired_rules, purge, rule_key):
+ for _i in range(tries):
+ current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
+ if purge and len(current_rules ^ set(desired_rules)) == 0:
+ return group
+ elif purge:
+ conflicts = current_rules ^ set(desired_rules)
+ # For cases where set comparison is equivalent, but invalid port/proto exist
+ for a, b in itertools.combinations(conflicts, 2):
+ if rule_cmp(a, b):
+ conflicts.discard(a)
+ conflicts.discard(b)
+ if not len(conflicts):
+ return group
+ elif current_rules.issuperset(desired_rules) and not purge:
+ return group
+ sleep(10)
+ group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0]
+ module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
+ return group
+
+ group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0]
+ if 'VpcId' in group and module.params.get('rules_egress') is not None:
+ group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
+ return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
+
+
+def group_exists(client, module, vpc_id, group_id, name):
+ params = {'Filters': []}
+ if group_id:
+ params['GroupIds'] = [group_id]
+ if name:
+ # Add name to filters rather than params['GroupNames']
+ # because params['GroupNames'] only checks the default vpc if no vpc is provided
+ params['Filters'].append({'Name': 'group-name', 'Values': [name]})
+ if vpc_id:
+ params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
+ # Don't filter by description to maintain backwards compatibility
+
+ try:
+ security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
+ all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Error in describe_security_groups")
+
+ if security_groups:
+ groups = dict((group['GroupId'], group) for group in all_groups)
+ groups.update(dict((group['GroupName'], group) for group in all_groups))
+ if vpc_id:
+ vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
+ groups.update(vpc_wins)
+ # maintain backwards compatibility by using the last matching group
+ return security_groups[-1], groups
+ return None, {}
+
+
+def get_diff_final_resource(client, module, security_group):
+ def get_account_id(security_group, module):
+ try:
+ owner_id = security_group.get('owner_id', current_account_id)
+ except (BotoCoreError, ClientError) as e:
+ owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
+ return owner_id
+
+ def get_final_tags(security_group_tags, specified_tags, purge_tags):
+ if specified_tags is None:
+ return security_group_tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
+ end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
+ end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
+ end_result_tags.update(tags_need_modify)
+ return end_result_tags
+
+ def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
+ if specified_rules is None:
+ return security_group_rules
+ if purge_rules:
+ final_rules = []
+ else:
+ final_rules = list(security_group_rules)
+ specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
+ for rule in specified_rules:
+ format_rule = {
+ 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
+ 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
+ }
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ format_rule['ip_protocol'] = '-1'
+ format_rule.pop('from_port')
+ format_rule.pop('to_port')
+ elif rule.get('ports'):
+ if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
+ rule['ports'] = [rule['ports']]
+ for port in rule.get('ports'):
+ if isinstance(port, string_types) and '-' in port:
+ format_rule['from_port'], format_rule['to_port'] = port.split('-')
+ else:
+ format_rule['from_port'] = format_rule['to_port'] = port
+ elif rule.get('from_port') or rule.get('to_port'):
+ format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
+ format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
+ for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
+ if rule.get(source_type):
+ rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
+ if rule.get('rule_desc'):
+ format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
+ else:
+ if not isinstance(rule[source_type], list):
+ rule[source_type] = [rule[source_type]]
+ format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
+ if rule.get('group_id') or rule.get('group_name'):
+ rule_sg = group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0]
+ if rule_sg is None:
+ # --diff during --check
+ format_rule['user_id_group_pairs'] = [{
+ 'group_id': rule.get('group_id'),
+ 'group_name': rule.get('group_name'),
+ 'peering_status': None,
+ 'user_id': get_account_id(security_group, module),
+ 'vpc_id': module.params['vpc_id'],
+ 'vpc_peering_connection_id': None
+ }]
+ else:
+ rule_sg = camel_dict_to_snake_dict(rule_sg)
+ format_rule['user_id_group_pairs'] = [{
+ 'description': rule_sg.get('description', rule_sg.get('group_desc')),
+ 'group_id': rule_sg.get('group_id', rule.get('group_id')),
+ 'group_name': rule_sg.get('group_name', rule.get('group_name')),
+ 'peering_status': rule_sg.get('peering_status'),
+ 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
+ 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
+ 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
+ }]
+ for k, v in list(format_rule['user_id_group_pairs'][0].items()):
+ if v is None:
+ format_rule['user_id_group_pairs'][0].pop(k)
+ final_rules.append(format_rule)
+ # Order final rules consistently
+ final_rules.sort(key=get_ip_permissions_sort_key)
+ return final_rules
+
+ security_group_ingress = security_group.get('ip_permissions', [])
+ specified_ingress = module.params['rules']
+ purge_ingress = module.params['purge_rules']
+ security_group_egress = security_group.get('ip_permissions_egress', [])
+ specified_egress = module.params['rules_egress']
+ purge_egress = module.params['purge_rules_egress']
+ return {
+ 'description': module.params['description'],
+ 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
+ 'group_name': security_group.get('group_name', module.params['name']),
+ 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
+ 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
+ 'owner_id': get_account_id(security_group, module),
+ 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
+ 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
+
+
+def flatten_nested_targets(module, rules):
+ def _flatten(targets):
+ for target in targets:
+ if isinstance(target, list):
+ module.deprecate('Support for nested lists in cidr_ip and cidr_ipv6 has been '
+ 'deprecated. The flatten filter can be used instead.',
+ date='2024-12-01', collection_name='amazon.aws')
+ for t in _flatten(target):
+ yield t
+ elif isinstance(target, string_types):
+ yield target
+
+ if rules is not None:
+ for rule in rules:
+ target_list_type = None
+ if isinstance(rule.get('cidr_ip'), list):
+ target_list_type = 'cidr_ip'
+ elif isinstance(rule.get('cidr_ipv6'), list):
+ target_list_type = 'cidr_ipv6'
+ if target_list_type is not None:
+ rule[target_list_type] = list(_flatten(rule[target_list_type]))
+ return rules
+
+
+def get_rule_sort_key(dicts):
+ if dicts.get('cidr_ip'):
+ return dicts.get('cidr_ip')
+ elif dicts.get('cidr_ipv6'):
+ return dicts.get('cidr_ipv6')
+ elif dicts.get('prefix_list_id'):
+ return dicts.get('prefix_list_id')
+ elif dicts.get('group_id'):
+ return dicts.get('group_id')
+ return None
+
+
+def get_ip_permissions_sort_key(rule):
+ if rule.get('ip_ranges'):
+ rule.get('ip_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ip_ranges')[0]['cidr_ip']
+ elif rule.get('ipv6_ranges'):
+ rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ipv6_ranges')[0]['cidr_ipv6']
+ elif rule.get('prefix_list_ids'):
+ rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
+ return rule.get('prefix_list_ids')[0]['prefix_list_id']
+ elif rule.get('user_id_group_pairs'):
+ rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
+ return rule.get('user_id_group_pairs')[0].get('group_id', '')
+ return None
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list', elements='dict'),
+ rules_egress=dict(type='list', elements='dict', aliases=['egress_rules']),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool', aliases=['purge_egress_rules']),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
+ rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ client = module.client('ec2', AWSRetry.jittered_backoff())
+
+ group, groups = group_exists(client, module, vpc_id, group_id, name)
+ group_created_new = not bool(group)
+
+ global current_account_id
+ current_account_id = get_aws_account_id(module)
+
+ before = {}
+ after = {}
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ try:
+ if not module.check_mode:
+ client.delete_security_group(aws_retry=True, GroupId=group['GroupId'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+ else:
+ # no match found, create it
+ group = create_security_group(client, module, name, description, vpc_id)
+ changed = True
+
+ if tags is not None and group is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
+
+ if group:
+ named_tuple_ingress_list = []
+ named_tuple_egress_list = []
+ current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
+ current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
+
+ for new_rules, _rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
+ (rules_egress, 'out', named_tuple_egress_list)]:
+ if new_rules is None:
+ continue
+ for rule in new_rules:
+ target_type, target, target_group_created = get_target_from_rule(
+ module, client, rule, name, group, groups, vpc_id)
+ changed |= target_group_created
+
+ rule.pop('icmp_type', None)
+ rule.pop('icmp_code', None)
+ rule.pop('icmp_keys', None)
+
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ rule['proto'] = '-1'
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ try:
+ int(rule.get('proto', 'tcp'))
+ rule['proto'] = to_text(rule.get('proto', 'tcp'))
+ rule['from_port'] = None
+ rule['to_port'] = None
+ except ValueError:
+ # rule does not use numeric protocol spec
+ pass
+ named_tuple_rule_list.append(
+ Rule(
+ port_range=(rule['from_port'], rule['to_port']),
+ protocol=to_text(rule.get('proto', 'tcp')),
+ target=target, target_type=target_type,
+ description=rule.get('rule_desc'),
+ )
+ )
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
+
+ if module.params.get('rules_egress') is None and 'VpcId' in group:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ if rule in current_egress:
+ named_tuple_egress_list.append(rule)
+ if rule not in current_egress:
+ current_egress.append(rule)
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
+ present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
+
+ if purge_rules:
+ revoke_ingress = []
+ for p in present_ingress:
+ if not any(rule_cmp(p, b) for b in named_tuple_ingress_list):
+ revoke_ingress.append(to_permission(p))
+ else:
+ revoke_ingress = []
+ if purge_rules_egress and module.params.get('rules_egress') is not None:
+ if module.params.get('rules_egress') is []:
+ revoke_egress = [
+ to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
+ if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ ]
+ else:
+ revoke_egress = []
+ for p in present_egress:
+ if not any(rule_cmp(p, b) for b in named_tuple_egress_list):
+ revoke_egress.append(to_permission(p))
+ else:
+ revoke_egress = []
+
+ # named_tuple_ingress_list and named_tuple_egress_list get updated by
+ # method update_rule_descriptions, deep copy these two lists to new
+ # variables for the record of the 'desired' ingress and egress sg permissions
+ desired_ingress = deepcopy(named_tuple_ingress_list)
+ desired_egress = deepcopy(named_tuple_egress_list)
+
+ changed |= update_rule_descriptions(module, client, group['GroupId'], present_ingress,
+ named_tuple_ingress_list, present_egress, named_tuple_egress_list)
+
+ # Revoke old rules
+ changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
+
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
+ new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
+ # Authorize new rules
+ changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
+
+ if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
+ # A new group with no rules provided is already being awaited.
+ # When it is created we wait for the default egress rule to be added by AWS
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ elif changed and not module.check_mode:
+ # keep pulling until current security group rules match the desired ingress and egress rules
+ security_group = wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
+ else:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
+
+ else:
+ security_group = {'group_id': None}
+
+ if module._diff:
+ if module.params['state'] == 'present':
+ after = get_diff_final_resource(client, module, security_group)
+ if before.get('ip_permissions'):
+ before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
+
+ security_group['diff'] = [{'before': before, 'after': after}]
+
+ module.exit_json(changed=changed, **security_group)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py
new file mode 100644
index 00000000..3440f90e
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_security_group_info
+version_added: 1.0.0
+short_description: Gather information about EC2 security groups in AWS
+description:
+ - Gather information about EC2 security groups in AWS.
+author:
+- Henrique Rodrigues (@Sodki)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for
+ possible filters. Filter names and values are case sensitive. You can also use underscores (_)
+ instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+notes:
+ - By default, the module will return all security groups in a region. To limit results use the
+ appropriate filters.
+ - Prior to release 5.0.0 this module was called C(amazon.aws.ec2_group_info). The usage did not
+ change.
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all security groups
+- amazon.aws.ec2_security_group_info:
+
+# Gather information about all security groups in a specific VPC
+- amazon.aws.ec2_security_group_info:
+ filters:
+ vpc-id: vpc-12345678
+
+# Gather information about all security groups in a specific VPC
+- amazon.aws.ec2_security_group_info:
+ filters:
+ vpc-id: vpc-12345678
+
+# Gather information about a security group
+- amazon.aws.ec2_security_group_info:
+ filters:
+ group-name: example-1
+
+# Gather information about a security group by id
+- amazon.aws.ec2_security_group_info:
+ filters:
+ group-id: sg-12345678
+
+# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys
+- amazon.aws.ec2_security_group_info:
+ filters:
+ group_id: sg-12345678
+ vpc-id: vpc-12345678
+
+# Gather information about various security groups
+- amazon.aws.ec2_security_group_info:
+ filters:
+ group-name:
+ - example-1
+ - example-2
+ - example-3
+
+# Gather information about any security group with a tag key Name and value Example.
+# The quotes around 'tag:name' are important because of the colon in the value
+- amazon.aws.ec2_security_group_info:
+ filters:
+ "tag:Name": Example
+'''
+
+RETURN = '''
+security_groups:
+ description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
+ type: list
+ returned: always
+ elements: dict
+ contains:
+ description:
+ description: The description of the security group.
+ returned: always
+ type: str
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ ip_permissions:
+ description: The inbound rules associated with the security group.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ip_protocol:
+ description: The IP protocol name or number.
+ returned: always
+ type: str
+ ip_ranges:
+ description: The IPv4 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ip:
+ description: The IPv4 CIDR range.
+ returned: always
+ type: str
+ ipv6_ranges:
+ description: The IPv6 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ipv6:
+ description: The IPv6 CIDR range.
+ returned: always
+ type: str
+ prefix_list_ids:
+ description: The prefix list IDs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ prefix_list_id:
+ description: The ID of the prefix.
+ returned: always
+ type: str
+ user_id_group_pairs:
+ description: The security group and AWS account ID pairs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The security group ID of the pair.
+ returned: always
+ type: str
+ user_id:
+ description: The user ID of the pair.
+ returned: always
+ type: str
+ ip_permissions_egress:
+ description: The outbound rules associated with the security group.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ip_protocol:
+ description: The IP protocol name or number.
+ returned: always
+ type: str
+ ip_ranges:
+ description: The IPv4 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ip:
+ description: The IPv4 CIDR range.
+ returned: always
+ type: str
+ ipv6_ranges:
+ description: The IPv6 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ipv6:
+ description: The IPv6 CIDR range.
+ returned: always
+ type: str
+ prefix_list_ids:
+ description: The prefix list IDs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ prefix_list_id:
+ description: The ID of the prefix.
+ returned: always
+ type: str
+ user_id_group_pairs:
+ description: The security group and AWS account ID pairs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The security group ID of the pair.
+ returned: always
+ type: str
+ user_id:
+ description: The user ID of the pair.
+ returned: always
+ type: str
+ owner_id:
+ description: The AWS account ID of the owner of the security group.
+ returned: always
+ type: str
+ tags:
+ description: The tags associated with the security group.
+ returned: always
+ type: dict
+ vpc_id:
+ description: The ID of the VPC for the security group.
+ returned: always
+ type: str
+ sample: [
+ {
+ "description": "created by rds_instance integration tests",
+ "group_id": "sg-036496a610b79da88",
+ "group_name": "ansible-test-89355088-unknown5c5f67f3ad09-sg-1",
+ "ip_permissions": [],
+ "ip_permissions_egress": [
+ {
+ "ip_protocol": "-1",
+ "ip_ranges": [
+ {
+ "cidr_ip": "0.0.0.0/0"
+ }
+ ],
+ "ipv6_ranges": [],
+ "prefix_list_ids": [],
+ "user_id_group_pairs": []
+ }
+ ],
+ "owner_id": "123456789012",
+ "tags": {},
+ "vpc_id": "vpc-0bc3bb03f97405435"
+ }
+ ]
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('ec2', AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
+ filters = module.params.get("filters")
+ sanitized_filters = dict()
+
+ for key in filters:
+ if key.startswith("tag:"):
+ sanitized_filters[key] = filters[key]
+ else:
+ sanitized_filters[key.replace("_", "-")] = filters[key]
+
+ try:
+ security_groups = connection.describe_security_groups(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to describe security groups')
+
+ snaked_security_groups = []
+ for security_group in security_groups['SecurityGroups']:
+ # Modify boto3 tags list to be ansible friendly dict
+ # but don't camel case tags
+ security_group = camel_dict_to_snake_dict(security_group)
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value')
+ snaked_security_groups.append(security_group)
+
+ module.exit_json(security_groups=snaked_security_groups)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py
new file mode 100644
index 00000000..5a74ca4b
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_snapshot
+version_added: 1.0.0
+short_description: Creates a snapshot from an existing volume
+description:
+ - Creates an EC2 snapshot from an existing EBS volume.
+options:
+ volume_id:
+ description:
+ - Volume from which to take the snapshot.
+ required: false
+ type: str
+ description:
+ description:
+ - Description to be applied to the snapshot.
+ required: false
+ type: str
+ instance_id:
+ description:
+ - Instance that has the required volume to snapshot mounted.
+ required: false
+ type: str
+ device_name:
+ description:
+ - Device name of a mounted volume to be snapshotted.
+ required: false
+ type: str
+ snapshot_tags:
+ description:
+ - A dictionary of tags to add to the snapshot.
+ - If the volume has a C(Name) tag this will be automatically added to the
+ snapshot.
+ type: dict
+ required: false
+ wait:
+ description:
+ - Wait for the snapshot to be ready.
+ type: bool
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ required: false
+ default: 600
+ type: int
+ state:
+ description:
+ - Whether to add or create a snapshot.
+ required: false
+ default: present
+ choices: ['absent', 'present']
+ type: str
+ snapshot_id:
+ description:
+ - Snapshot id to remove.
+ required: false
+ type: str
+ last_snapshot_min_age:
+ description:
+ - If the volume's most recent snapshot has started less than I(last_snapshot_min_age) minutes ago, a new snapshot will not be created.
+ required: false
+ default: 0
+ type: int
+author: "Will Thames (@willthames)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Simple snapshot of volume using volume_id
+- amazon.aws.ec2_snapshot:
+ volume_id: vol-abcdef12
+ description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
+
+# Snapshot of volume mounted on device_name attached to instance_id
+- amazon.aws.ec2_snapshot:
+ instance_id: i-12345678
+ device_name: /dev/sdb1
+ description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
+
+# Snapshot of volume with tagging
+- amazon.aws.ec2_snapshot:
+ instance_id: i-12345678
+ device_name: /dev/sdb1
+ snapshot_tags:
+ frequency: hourly
+ source: /data
+
+# Remove a snapshot
+- amazon.aws.ec2_snapshot:
+ snapshot_id: snap-abcd1234
+ state: absent
+
+# Create a snapshot only if the most recent one is older than 1 hour
+- amazon.aws.ec2_snapshot:
+ volume_id: vol-abcdef12
+ last_snapshot_min_age: 60
+'''
+
+RETURN = '''
+snapshot_id:
+ description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
+ type: str
+ returned: always
+ sample: snap-01234567
+tags:
+ description: Any tags assigned to the snapshot.
+ type: dict
+ returned: always
+ sample: "{ 'Name': 'instance-name' }"
+volume_id:
+ description: The ID of the volume that was used to create the snapshot.
+ type: str
+ returned: always
+ sample: vol-01234567
+volume_size:
+ description: The size of the volume, in GiB.
+ type: int
+ returned: always
+ sample: 8
+'''
+
+import datetime
+
+try:
+ import botocore
+except ImportError:
+ pass # Taken care of by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
+ """
+ Gets the most recently created snapshot and optionally filters the result
+ if the snapshot is too old
+ :param snapshots: list of snapshots to search
+ :param max_snapshot_age_secs: filter the result if its older than this
+ :param now: simulate time -- used for unit testing
+ :return:
+ """
+ if len(snapshots) == 0:
+ return None
+
+ if not now:
+ now = datetime.datetime.now(datetime.timezone.utc)
+
+ youngest_snapshot = max(snapshots, key=lambda s: s['StartTime'])
+ snapshot_start = youngest_snapshot['StartTime']
+ snapshot_age = now - snapshot_start
+
+ if max_snapshot_age_secs is not None:
+ if snapshot_age.total_seconds() > max_snapshot_age_secs:
+ return None
+
+ return youngest_snapshot
+
+
+def get_volume_by_instance(module, ec2, device_name, instance_id):
+ try:
+ _filter = {
+ 'attachment.instance-id': instance_id,
+ 'attachment.device': device_name
+ }
+ volumes = ec2.describe_volumes(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list(_filter)
+ )['Volumes']
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe Volume")
+
+ if not volumes:
+ module.fail_json(
+ msg="Could not find volume with name {0} attached to instance {1}".format(
+ device_name, instance_id
+ )
+ )
+
+ volume = volumes[0]
+ return volume
+
+
+def get_volume_by_id(module, ec2, volume):
+ try:
+ volumes = ec2.describe_volumes(
+ aws_retry=True,
+ VolumeIds=[volume],
+ )['Volumes']
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe Volume")
+
+ if not volumes:
+ module.fail_json(
+ msg="Could not find volume with id {0}".format(volume)
+ )
+
+ volume = volumes[0]
+ return volume
+
+
+@AWSRetry.jittered_backoff()
+def _describe_snapshots(ec2, **params):
+ paginator = ec2.get_paginator('describe_snapshots')
+ return paginator.paginate(**params).build_full_result()
+
+
+# Handle SnapshotCreationPerVolumeRateExceeded separately because we need a much
+# longer delay than normal
+@AWSRetry.jittered_backoff(catch_extra_error_codes=['SnapshotCreationPerVolumeRateExceeded'], delay=15)
+def _create_snapshot(ec2, **params):
+ # Fast retry on common failures ('global' rate limits)
+ return ec2.create_snapshot(aws_retry=True, **params)
+
+
+def get_snapshots_by_volume(module, ec2, volume_id):
+ _filter = {'volume-id': volume_id}
+ try:
+ results = _describe_snapshots(
+ ec2,
+ Filters=ansible_dict_to_boto3_filter_list(_filter)
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe snapshots from volume")
+
+ return results['Snapshots']
+
+
+def create_snapshot(module, ec2, description=None, wait=None,
+ wait_timeout=None, volume_id=None, instance_id=None,
+ snapshot_id=None, device_name=None, snapshot_tags=None,
+ last_snapshot_min_age=None):
+ snapshot = None
+ changed = False
+
+ if instance_id:
+ volume = get_volume_by_instance(
+ module, ec2, device_name, instance_id
+ )
+ volume_id = volume['VolumeId']
+ else:
+ volume = get_volume_by_id(module, ec2, volume_id)
+ if 'Tags' not in volume:
+ volume['Tags'] = {}
+ if last_snapshot_min_age > 0:
+ current_snapshots = get_snapshots_by_volume(module, ec2, volume_id)
+ last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
+ snapshot = _get_most_recent_snapshot(
+ current_snapshots,
+ max_snapshot_age_secs=last_snapshot_min_age
+ )
+ # Create a new snapshot if we didn't find an existing one to use
+ if snapshot is None:
+ volume_tags = boto3_tag_list_to_ansible_dict(volume['Tags'])
+ volume_name = volume_tags.get('Name')
+ _tags = dict()
+ if volume_name:
+ _tags['Name'] = volume_name
+ if snapshot_tags:
+ _tags.update(snapshot_tags)
+
+ params = {'VolumeId': volume_id}
+ if description:
+ params['Description'] = description
+ if _tags:
+ params['TagSpecifications'] = [{
+ 'ResourceType': 'snapshot',
+ 'Tags': ansible_dict_to_boto3_tag_list(_tags),
+ }]
+ try:
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have created a snapshot if not in check mode',
+ volume_id=volume['VolumeId'], volume_size=volume['Size'])
+ snapshot = _create_snapshot(ec2, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to create snapshot")
+ changed = True
+ if wait:
+ waiter = get_waiter(ec2, 'snapshot_completed')
+ try:
+ waiter.wait(
+ SnapshotIds=[snapshot['SnapshotId']],
+ WaiterConfig=dict(Delay=3, MaxAttempts=wait_timeout // 3)
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='Timed out while creating snapshot')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(
+ e, msg='Error while waiting for snapshot creation'
+ )
+
+ _tags = boto3_tag_list_to_ansible_dict(snapshot['Tags'])
+ _snapshot = camel_dict_to_snake_dict(snapshot)
+ _snapshot['tags'] = _tags
+ results = {
+ 'snapshot_id': snapshot['SnapshotId'],
+ 'volume_id': snapshot['VolumeId'],
+ 'volume_size': snapshot['VolumeSize'],
+ 'tags': _tags,
+ 'snapshots': [_snapshot],
+ }
+
+ module.exit_json(changed=changed, **results)
+
+
+def delete_snapshot(module, ec2, snapshot_id):
+ if module.check_mode:
+ try:
+ _describe_snapshots(ec2, SnapshotIds=[(snapshot_id)])
+ module.exit_json(changed=True, msg='Would have deleted snapshot if not in check mode')
+ except is_boto3_error_code('InvalidSnapshot.NotFound'):
+ module.exit_json(changed=False, msg='Invalid snapshot ID - snapshot not found')
+ try:
+ ec2.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id)
+ except is_boto3_error_code('InvalidSnapshot.NotFound'):
+ module.exit_json(changed=False)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to delete snapshot")
+
+ # successful delete
+ module.exit_json(changed=True)
+
+
+def create_snapshot_ansible_module():
+ argument_spec = dict(
+ volume_id=dict(),
+ description=dict(),
+ instance_id=dict(),
+ snapshot_id=dict(),
+ device_name=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ last_snapshot_min_age=dict(type='int', default=0),
+ snapshot_tags=dict(type='dict', default=dict()),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ mutually_exclusive = [
+ ('instance_id', 'snapshot_id', 'volume_id'),
+ ]
+ required_if = [
+ ('state', 'absent', ('snapshot_id',)),
+ ]
+ required_one_of = [
+ ('instance_id', 'snapshot_id', 'volume_id'),
+ ]
+ required_together = [
+ ('instance_id', 'device_name'),
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ required_one_of=required_one_of,
+ required_together=required_together,
+ supports_check_mode=True,
+ )
+
+ return module
+
+
+def main():
+ module = create_snapshot_ansible_module()
+
+ volume_id = module.params.get('volume_id')
+ snapshot_id = module.params.get('snapshot_id')
+ description = module.params.get('description')
+ instance_id = module.params.get('instance_id')
+ device_name = module.params.get('device_name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ last_snapshot_min_age = module.params.get('last_snapshot_min_age')
+ snapshot_tags = module.params.get('snapshot_tags')
+ state = module.params.get('state')
+
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ if state == 'absent':
+ delete_snapshot(
+ module=module,
+ ec2=ec2,
+ snapshot_id=snapshot_id,
+ )
+ else:
+ create_snapshot(
+ module=module,
+ description=description,
+ wait=wait,
+ wait_timeout=wait_timeout,
+ ec2=ec2,
+ volume_id=volume_id,
+ instance_id=instance_id,
+ snapshot_id=snapshot_id,
+ device_name=device_name,
+ snapshot_tags=snapshot_tags,
+ last_snapshot_min_age=last_snapshot_min_age,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py
new file mode 100644
index 00000000..2b7b5115
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_snapshot_info
+version_added: 1.0.0
+short_description: Gathers information about EC2 volume snapshots in AWS
+description:
+ - Gathers information about EC2 volume snapshots in AWS.
+author:
+ - Rob White (@wimnat)
+ - Aubin Bikouo (@abikouo)
+options:
+ snapshot_ids:
+ description:
+ - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ owner_ids:
+ description:
+ - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have
+ access are returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ restorable_by_user_ids:
+ description:
+ - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are
+ returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ type: dict
+ default: {}
+ max_results:
+ description:
+ - The maximum number of snapshot results returned in paginated output.
+ - When used only a single page along with a C(next_token_id) response element will be returned.
+ - The remaining results of the initial request can be seen by sending another request with the returned C(next_token_id) value.
+ - This value can be between 5 and 1000; if I(next_token_id) is given a value larger than 1000, only 1000 results are returned.
+ - If this parameter is not used, then DescribeSnapshots returns all results.
+ - This parameter is mutually exclusive with I(snapshot_ids).
+ required: False
+ type: int
+ next_token_id:
+ description:
+ - Contains the value returned from a previous paginated request where I(max_results) was used and the results exceeded the value of that parameter.
+ - Pagination continues from the end of the previous results that returned the I(next_token_id) value.
+ - This parameter is mutually exclusive with I(snapshot_ids)
+ required: false
+ type: str
+notes:
+ - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by
+ the account use the filter 'owner-id'.
+
+extends_documentation_fragment:
+ - amazon.aws.ec2
+ - amazon.aws.aws
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all snapshots, including public ones
+- amazon.aws.ec2_snapshot_info:
+
+# Gather information about all snapshots owned by the account 123456789012
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ owner-id: 123456789012
+
+# Or alternatively...
+- amazon.aws.ec2_snapshot_info:
+ owner_ids:
+ - 123456789012
+
+# Gather information about a particular snapshot using ID
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ snapshot-id: snap-00112233
+
+# Or alternatively...
+- amazon.aws.ec2_snapshot_info:
+ snapshot_ids:
+ - snap-00112233
+
+# Gather information about any snapshot with a tag key Name and value Example
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any snapshot with an error status
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ status: error
+
+'''
+
+RETURN = r'''
+snapshots:
+ description: List of snapshots retrieved with their respective info.
+ type: list
+ returned: success
+ elements: dict
+ contains:
+ snapshot_id:
+ description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
+ type: str
+ returned: always
+ sample: snap-01234567
+ volume_id:
+ description: The ID of the volume that was used to create the snapshot.
+ type: str
+ returned: always
+ sample: vol-01234567
+ state:
+ description: The snapshot state (completed, pending or error).
+ type: str
+ returned: always
+ sample: completed
+ state_message:
+ description:
+ - Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
+ AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
+ error occurred.
+ type: str
+ returned: always
+ sample:
+ start_time:
+ description: The time stamp when the snapshot was initiated.
+ type: str
+ returned: always
+ sample: "2015-02-12T02:14:02+00:00"
+ progress:
+ description: The progress of the snapshot, as a percentage.
+ type: str
+ returned: always
+ sample: "100%"
+ owner_id:
+ description: The AWS account ID of the EBS snapshot owner.
+ type: str
+ returned: always
+ sample: "123456789012"
+ description:
+ description: The description for the snapshot.
+ type: str
+ returned: always
+ sample: "My important backup"
+ volume_size:
+ description: The size of the volume, in GiB.
+ type: int
+ returned: always
+ sample: 8
+ owner_alias:
+ description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
+ type: str
+ returned: always
+ sample: "123456789012"
+ tags:
+ description: Any tags assigned to the snapshot.
+ type: dict
+ returned: always
+ sample: "{ 'my_tag_key': 'my_tag_value' }"
+ encrypted:
+ description: Indicates whether the snapshot is encrypted.
+ type: bool
+ returned: always
+ sample: "True"
+ kms_key_id:
+ description:
+ - The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to
+ protect the volume encryption key for the parent volume.
+ type: str
+ returned: always
+ sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456"
+ data_encryption_key_id:
+ description:
+ - The data encryption key identifier for the snapshot. This value is a unique identifier that
+ corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
+ type: str
+ returned: always
+ sample: "arn:aws:kms:ap-southeast-2:123456789012:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
+next_token_id:
+ description:
+ - Contains the value returned from a previous paginated request where C(max_results) was used and the results exceeded the value of that parameter.
+ - This value is null when there are no more results to return.
+ type: str
+ returned: when option C(max_results) is set in input
+'''
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_snapshots(connection, module):
+
+ snapshot_ids = module.params.get("snapshot_ids")
+ owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")]
+ restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")]
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+ max_results = module.params.get('max_results')
+ next_token = module.params.get('next_token_id')
+ optional_param = {}
+ if max_results:
+ optional_param['MaxResults'] = max_results
+ if next_token:
+ optional_param['NextToken'] = next_token
+
+ try:
+ snapshots = connection.describe_snapshots(
+ aws_retry=True,
+ SnapshotIds=snapshot_ids, OwnerIds=owner_ids,
+ RestorableByUserIds=restorable_by_user_ids, Filters=filters,
+ **optional_param)
+ except is_boto3_error_code('InvalidSnapshot.NotFound') as e:
+ if len(snapshot_ids) > 1:
+ module.warn("Some of your snapshots may exist, but %s" % str(e))
+ snapshots = {'Snapshots': []}
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to describe snapshots')
+
+ result = {}
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_snapshots = []
+ for snapshot in snapshots['Snapshots']:
+ snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for snapshot in snaked_snapshots:
+ if 'tags' in snapshot:
+ snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value')
+
+ result['snapshots'] = snaked_snapshots
+
+ if snapshots.get('NextToken'):
+ result.update(camel_dict_to_snake_dict({'NextTokenId': snapshots.get('NextToken')}))
+
+ module.exit_json(**result)
+
+
+def main():
+
+ argument_spec = dict(
+ snapshot_ids=dict(default=[], type='list', elements='str'),
+ owner_ids=dict(default=[], type='list', elements='str'),
+ restorable_by_user_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict'),
+ max_results=dict(type='int'),
+ next_token_id=dict(type='str')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'],
+ ['snapshot_ids', 'max_results'],
+ ['snapshot_ids', 'next_token_id']
+ ],
+ supports_check_mode=True
+ )
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_snapshots(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py
new file mode 100644
index 00000000..925b2db1
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py
@@ -0,0 +1,626 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_spot_instance
+version_added: 2.0.0
+short_description: Request, stop, reboot or cancel spot instance
+description:
+ - Creates or cancels spot instance requests.
+author:
+ - Sri Rachana Achyuthuni (@srirachanaachyuthuni)
+options:
+ zone_group:
+ description:
+ - Name for logical grouping of spot requests.
+ - All spot instances in the request are launched in the same availability zone.
+ type: str
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ type: str
+ count:
+ description:
+ - Number of instances to launch.
+ default: 1
+ type: int
+ interruption:
+ description:
+ - The behavior when a Spot Instance is interrupted.
+ choices: [ "hibernate", "stop", "terminate" ]
+ type: str
+ default: terminate
+ launch_group:
+ description:
+ - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group).
+ type: str
+ launch_specification:
+ description:
+ - The launch specification.
+ type: dict
+ suboptions:
+ security_group_ids:
+ description:
+ - Security group id (or list of ids) to use with the instance.
+ type: list
+ elements: str
+ security_groups:
+ description:
+ - Security group name (or list of group names) to use with the instance.
+ - Only supported with EC2 Classic. To launch in a VPC, use C(group_id)
+ type: list
+ elements: str
+ key_name:
+ description:
+ - Key to use on the instance.
+ - The SSH key must already exist in AWS in order to use this argument.
+ - Keys can be created / deleted using the M(amazon.aws.ec2_key) module.
+ type: str
+ subnet_id:
+ description:
+ - The ID of the subnet in which to launch the instance.
+ type: str
+ user_data:
+ description:
+ - The base64-encoded user data for the instance. User data is limited to 16 KB.
+ type: str
+ block_device_mappings:
+ description:
+ - A list of hash/dictionaries of volumes to add to the new instance.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ description:
+ - The device name (for example, /dev/sdh or xvdh ).
+ type: str
+ virtual_name:
+ description:
+ - The virtual device name
+ type: str
+ ebs:
+ description:
+ - Parameters used to automatically set up EBS volumes when the instance is launched,
+ see U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.request_spot_instances)
+ type: dict
+ no_device:
+ description:
+ - To omit the device from the block device mapping, specify an empty string.
+ type: str
+ ebs_optimized:
+ description:
+ - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ default: false
+ type: bool
+ iam_instance_profile:
+ description:
+ - The IAM instance profile.
+ type: dict
+ suboptions:
+ arn:
+ description:
+ - The Amazon Resource Name (ARN) of the instance profile.
+ - Only one of I(arn) or I(name) may be specified.
+ type: str
+ name:
+ description:
+ - The name of the instance profile.
+ - Only one of I(arn) or I(name) may be specified.
+ type: str
+ image_id:
+ description:
+ - The ID of the AMI.
+ type: str
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
+ - Required when creating a new instance.
+ type: str
+ kernel_id:
+ description:
+ - The ID of the kernel.
+ type: str
+ network_interfaces:
+ description:
+ - One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface.
+ type: list
+ elements: dict
+ suboptions:
+ associate_public_ip_address:
+ description:
+ - Indicates whether to assign a public IPv4 address to an instance you launch in a VPC.
+ type: bool
+ delete_on_termination:
+ description:
+ - If set to true , the interface is deleted when the instance is terminated.
+ You can specify true only if creating a new network interface when launching an instance.
+ type: bool
+ description:
+ description:
+ - The description of the network interface. Applies only if creating a network interface when launching an instance.
+ type: str
+ device_index:
+ description:
+ - The position of the network interface in the attachment order. A primary network interface has a device index of 0.
+ - If you specify a network interface when launching an instance, you must specify the device index.
+ type: int
+ groups:
+ description:
+ - The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.
+ type: list
+ elements: str
+ ipv6_address_count:
+ description:
+ - A number of IPv6 addresses to assign to the network interface
+ type: int
+ ipv6_addresses:
+ description:
+ - One or more IPv6 addresses to assign to the network interface.
+ type: list
+ elements: dict
+ suboptions:
+ ipv6address:
+ description: The IPv6 address.
+ type: str
+ network_interface_id:
+ description:
+ - The ID of the network interface.
+ type: str
+ private_ip_address:
+ description:
+ - The private IPv4 address of the network interface
+ type: str
+ private_ip_addresses:
+ description:
+ - One or more private IPv4 addresses to assign to the network interface
+ type: list
+ elements: dict
+ secondary_private_ip_address_count:
+ description:
+ - The number of secondary private IPv4 addresses.
+ type: int
+ subnet_id:
+ description:
+ - The ID of the subnet associated with the network interface
+ type: str
+ associate_carrier_ip_address:
+ description:
+ - Indicates whether to assign a carrier IP address to the network interface.
+ type: bool
+ interface_type:
+ description:
+ - The type of network interface.
+ type: str
+ choices: ['interface', 'efa']
+ network_card_index:
+ description:
+ - The index of the network card.
+ type: int
+ ipv4_prefixes:
+ description:
+ - One or more IPv4 delegated prefixes to be assigned to the network interface.
+ type: list
+ elements: dict
+ ipv4_prefix_count:
+ description:
+ - The number of IPv4 delegated prefixes to be automatically assigned to the network interface
+ type: int
+ ipv6_prefixes:
+ description:
+ - One or more IPv6 delegated prefixes to be assigned to the network interface
+ type: list
+ elements: dict
+ ipv6_prefix_count:
+ description:
+ - The number of IPv6 delegated prefixes to be automatically assigned to the network interface
+ type: int
+ placement:
+ description:
+ - The placement information for the instance.
+ type: dict
+ suboptions:
+ availability_zone:
+ description:
+ - The Availability Zone.
+ type: str
+ group_name:
+ description:
+ - The name of the placement group.
+ type: str
+ tenancy:
+ description:
+ - the tenancy of the host
+ type: str
+ choices: ['default', 'dedicated', 'host']
+ default: default
+ ramdisk_id:
+ description:
+ - The ID of the RAM disk.
+ type: str
+ monitoring:
+ description:
+ - Indicates whether basic or detailed monitoring is enabled for the instance.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ type: bool
+ default: false
+ state:
+ description:
+ - Whether the spot request should be created or removed.
+ - When I(state=present), I(launch_specification) is required.
+ - When I(state=absent), I(spot_instance_request_ids) is required.
+ default: 'present'
+ choices: [ 'absent', 'present' ]
+ type: str
+ spot_price:
+ description:
+ - Maximum spot price to bid. If not set, a regular on-demand instance is requested.
+ - A spot request is made with this maximum bid. When it is filled, the instance is started.
+ type: str
+ spot_type:
+ description:
+ - The type of spot request.
+ - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again.
+ default: 'one-time'
+ choices: [ "one-time", "persistent" ]
+ type: str
+ tags:
+ description:
+ - A dictionary of key-value pairs for tagging the Spot Instance request on creation.
+ type: dict
+ spot_instance_request_ids:
+ description:
+ - List of strings with IDs of spot requests to be cancelled
+ default: []
+ type: list
+ elements: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Simple Spot Request Creation
+ amazon.aws.ec2_spot_instance:
+ launch_specification:
+ image_id: ami-123456789
+ key_name: my-keypair
+ instance_type: t2.medium
+
+- name: Spot Request Creation with more options
+ amazon.aws.ec2_spot_instance:
+ launch_specification:
+ image_id: ami-123456789
+ key_name: my-keypair
+ instance_type: t2.medium
+ subnet_id: subnet-12345678
+ block_device_mappings:
+ - device_name: /dev/sdb
+ ebs:
+ delete_on_termination: True
+ volume_type: gp3
+ volume_size: 5
+ - device_name: /dev/sdc
+ ebs:
+ delete_on_termination: True
+ volume_type: io2
+ volume_size: 30
+ network_interfaces:
+ - associate_public_ip_address: False
+ delete_on_termination: True
+ device_index: 0
+ placement:
+ availability_zone: us-west-2a
+ monitoring:
+ enabled: False
+ spot_price: 0.002
+ tags:
+ Environment: Testing
+
+- name: Spot Request Termination
+ amazon.aws.ec2_spot_instance:
+ spot_instance_request_ids: ['sir-12345678', 'sir-abcdefgh']
+ state: absent
+'''
+
+RETURN = '''
+spot_request:
+ description: The spot instance request details after creation
+ returned: when success
+ type: dict
+ sample: {
+ "create_time": "2021-08-23T22:59:12+00:00",
+ "instance_interruption_behavior": "terminate",
+ "launch_specification": {
+ "block_device_mappings": [
+ {
+ "device_name": "/dev/sdb",
+ "ebs": {
+ "delete_on_termination": true,
+ "volume_size": 5,
+ "volume_type": "gp3"
+ }
+ }
+ ],
+ "ebs_optimized": false,
+ "iam_instance_profile": {
+ "arn": "arn:aws:iam::EXAMPLE:instance-profile/myinstanceprofile"
+ },
+ "image_id": "ami-083ac7c7ecf9bb9b0",
+ "instance_type": "t2.small",
+ "key_name": "mykey",
+ "monitoring": {
+ "enabled": false
+ },
+ "network_interfaces": [
+ {
+ "associate_public_ip_address": false,
+ "delete_on_termination": true,
+ "device_index": 0
+ }
+ ],
+ "placement": {
+ "availability_zone": "us-west-2a",
+ "tenancy": "default"
+ },
+ "security_groups": [
+ {
+ "group_name": "default"
+ }
+ ]
+ },
+ "product_description": "Linux/UNIX",
+ "spot_instance_request_id": "sir-1234abcd",
+ "spot_price": "0.00600",
+ "state": "open",
+ "status": {
+ "code": "pending-evaluation",
+ "message": "Your Spot request has been submitted for review, and is pending evaluation.",
+ "update_time": "2021-08-23T22:59:12+00:00"
+ },
+ "type": "one-time"
+
+ }
+
+cancelled_spot_request:
+ description: The spot instance request details that has been cancelled
+ returned: always
+ type: str
+ sample: 'Spot requests with IDs: sir-1234abcd have been cancelled'
+'''
+# TODO: add support for datetime-based parameters
+# import datetime
+# import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def build_launch_specification(launch_spec):
+ """
+ Remove keys that have a value of None from Launch Specification
+ Descend into these subkeys:
+ network_interfaces
+ block_device_mappings
+ monitoring
+ placement
+ iam_instance_profile
+ """
+ assigned_keys = dict((k, v) for k, v in launch_spec.items() if v is not None)
+
+ sub_key_to_build = ['placement', 'iam_instance_profile', 'monitoring']
+ for subkey in sub_key_to_build:
+ if launch_spec[subkey] is not None:
+ assigned_keys[subkey] = dict((k, v) for k, v in launch_spec[subkey].items() if v is not None)
+
+ if launch_spec['network_interfaces'] is not None:
+ interfaces = []
+ for iface in launch_spec['network_interfaces']:
+ interfaces.append(dict((k, v) for k, v in iface.items() if v is not None))
+ assigned_keys['network_interfaces'] = interfaces
+
+ if launch_spec['block_device_mappings'] is not None:
+ block_devs = []
+ for dev in launch_spec['block_device_mappings']:
+ block_devs.append(
+ dict((k, v) for k, v in dev.items() if v is not None))
+ assigned_keys['block_device_mappings'] = block_devs
+
+ return snake_dict_to_camel_dict(assigned_keys, capitalize_first=True)
+
+
+def request_spot_instances(module, connection):
+
+ # connection.request_spot_instances() always creates a new spot request
+ changed = True
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ params = {}
+
+ if module.params.get('launch_specification'):
+ params['LaunchSpecification'] = build_launch_specification(module.params.get('launch_specification'))
+
+ if module.params.get('zone_group'):
+ params['AvailabilityZoneGroup'] = module.params.get('zone_group')
+
+ if module.params.get('count'):
+ params['InstanceCount'] = module.params.get('count')
+
+ if module.params.get('launch_group'):
+ params['LaunchGroup'] = module.params.get('launch_group')
+
+ if module.params.get('spot_price'):
+ params['SpotPrice'] = module.params.get('spot_price')
+
+ if module.params.get('spot_type'):
+ params['Type'] = module.params.get('spot_type')
+
+ if module.params.get('client_token'):
+ params['ClientToken'] = module.params.get('client_token')
+
+ if module.params.get('interruption'):
+ params['InstanceInterruptionBehavior'] = module.params.get('interruption')
+
+ if module.params.get('tags'):
+ params['TagSpecifications'] = [{
+ 'ResourceType': 'spot-instances-request',
+ 'Tags': ansible_dict_to_boto3_tag_list(module.params.get('tags')),
+ }]
+
+ # TODO: add support for datetime-based parameters
+ # params['ValidFrom'] = module.params.get('valid_from')
+ # params['ValidUntil'] = module.params.get('valid_until')
+
+ try:
+ request_spot_instance_response = (connection.request_spot_instances(aws_retry=True, **params))['SpotInstanceRequests'][0]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while creating the spot instance request')
+
+ request_spot_instance_response['Tags'] = boto3_tag_list_to_ansible_dict(request_spot_instance_response.get('Tags', []))
+ spot_request = camel_dict_to_snake_dict(request_spot_instance_response, ignore_list=['Tags'])
+ module.exit_json(spot_request=spot_request, changed=changed)
+
+
+def cancel_spot_instance_requests(module, connection):
+
+ changed = False
+ spot_instance_request_ids = module.params.get('spot_instance_request_ids')
+ requests_exist = dict()
+ try:
+ paginator = connection.get_paginator('describe_spot_instance_requests').paginate(SpotInstanceRequestIds=spot_instance_request_ids,
+ Filters=[{'Name': 'state', 'Values': ['open', 'active']}])
+ jittered_retry = AWSRetry.jittered_backoff()
+ requests_exist = jittered_retry(paginator.build_full_result)()
+ except is_boto3_error_code('InvalidSpotInstanceRequestID.NotFound'):
+ requests_exist['SpotInstanceRequests'] = []
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failure when describing spot requests")
+
+ try:
+ if len(requests_exist['SpotInstanceRequests']) > 0:
+ changed = True
+ if module.check_mode:
+ module.exit_json(changed=changed,
+ msg='Would have cancelled Spot request {0}'.format(spot_instance_request_ids))
+
+ connection.cancel_spot_instance_requests(aws_retry=True, SpotInstanceRequestIds=module.params.get('spot_instance_request_ids'))
+ module.exit_json(changed=changed, msg='Cancelled Spot request {0}'.format(module.params.get('spot_instance_request_ids')))
+ else:
+ module.exit_json(changed=changed, msg='Spot request not found or already cancelled')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while cancelling the spot instance request')
+
+
+def main():
+ network_interface_options = dict(
+ associate_public_ip_address=dict(type='bool'),
+ delete_on_termination=dict(type='bool'),
+ description=dict(type='str'),
+ device_index=dict(type='int'),
+ groups=dict(type='list', elements='str'),
+ ipv6_address_count=dict(type='int'),
+ ipv6_addresses=dict(type='list', elements='dict', options=dict(ipv6address=dict(type='str'))),
+ network_interface_id=dict(type='str'),
+ private_ip_address=dict(type='str'),
+ private_ip_addresses=dict(type='list', elements='dict'),
+ secondary_private_ip_address_count=dict(type='int'),
+ subnet_id=dict(type='str'),
+ associate_carrier_ip_address=dict(type='bool'),
+ interface_type=dict(type='str', choices=['interface', 'efa']),
+ network_card_index=dict(type='int'),
+ ipv4_prefixes=dict(type='list', elements='dict'),
+ ipv4_prefix_count=dict(type='int'),
+ ipv6_prefixes=dict(type='list', elements='dict'),
+ ipv6_prefix_count=dict(type='int')
+ )
+ block_device_mappings_options = dict(
+ device_name=dict(type='str'),
+ virtual_name=dict(type='str'),
+ ebs=dict(type='dict'),
+ no_device=dict(type='str'),
+ )
+ monitoring_options = dict(
+ enabled=dict(type='bool', default=False)
+ )
+ placement_options = dict(
+ availability_zone=dict(type='str'),
+ group_name=dict(type='str'),
+ tenancy=dict(type='str', choices=['default', 'dedicated', 'host'], default='default')
+ )
+ iam_instance_profile_options = dict(
+ arn=dict(type='str'),
+ name=dict(type='str')
+ )
+ launch_specification_options = dict(
+ security_group_ids=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ block_device_mappings=dict(type='list', elements='dict', options=block_device_mappings_options),
+ ebs_optimized=dict(type='bool', default=False),
+ iam_instance_profile=dict(type='dict', options=iam_instance_profile_options),
+ image_id=dict(type='str'),
+ instance_type=dict(type='str'),
+ kernel_id=dict(type='str'),
+ key_name=dict(type='str'),
+ monitoring=dict(type='dict', options=monitoring_options),
+ network_interfaces=dict(type='list', elements='dict', options=network_interface_options, default=[]),
+ placement=dict(type='dict', options=placement_options),
+ ramdisk_id=dict(type='str'),
+ user_data=dict(type='str'),
+ subnet_id=dict(type='str')
+ )
+
+ argument_spec = dict(
+ zone_group=dict(type='str'),
+ client_token=dict(type='str', no_log=False),
+ count=dict(type='int', default=1),
+ interruption=dict(type='str', default="terminate", choices=['hibernate', 'stop', 'terminate']),
+ launch_group=dict(type='str'),
+ launch_specification=dict(type='dict', options=launch_specification_options),
+ state=dict(default='present', choices=['present', 'absent']),
+ spot_price=dict(type='str'),
+ spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
+ tags=dict(type='dict'),
+ # valid_from=dict(type='datetime', default=datetime.datetime.now()),
+ # valid_until=dict(type='datetime', default=(datetime.datetime.now() + datetime.timedelta(minutes=60))
+ spot_instance_request_ids=dict(type='list', elements='str'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params['state']
+
+ if state == 'present':
+ request_spot_instances(module, connection)
+
+ if state == 'absent':
+ cancel_spot_instance_requests(module, connection)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py
new file mode 100644
index 00000000..e45fcb47
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py
@@ -0,0 +1,300 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_spot_instance_info
+version_added: 2.0.0
+short_description: Gather information about ec2 spot instance requests
+description:
+ - Describes the specified Spot Instance requests.
+author:
+ - Mandar Vijay Kulkarni (@mandar242)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - Filter names and values are case sensitive.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSpotInstanceRequests.html) for possible filters.
+ required: false
+ default: {}
+ type: dict
+ spot_instance_request_ids:
+ description:
+ - One or more Spot Instance request IDs.
+ required: false
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: describe the Spot Instance requests based on request IDs
+ amazon.aws.ec2_spot_instance_info:
+ spot_instance_request_ids:
+ - sir-12345678
+
+- name: describe the Spot Instance requests and filter results based on instance type
+ amazon.aws.ec2_spot_instance_info:
+ spot_instance_request_ids:
+ - sir-12345678
+ - sir-13579246
+ - sir-87654321
+ filters:
+ launch.instance-type: t3.medium
+
+- name: describe the Spot requests filtered using multiple filters
+ amazon.aws.ec2_spot_instance_info:
+ filters:
+ state: active
+ launch.block-device-mapping.device-name: /dev/sdb
+
+'''
+
+RETURN = '''
+spot_request:
+ description: The gathered information about specified spot instance requests.
+ returned: when success
+ type: list
+ elements: dict
+ contains:
+ create_time:
+ description: The date and time when the Spot Instance request was created.
+ returned: always
+ type: str
+ instance_id:
+ description: The instance ID, if an instance has been launched to fulfill the Spot Instance request.
+ returned: when instance exists
+ type: str
+ instance_interruption_behavior:
+ description: The behavior when a Spot Instance is interruped.
+ returned: always
+ type: str
+ launch_specification:
+ description: Additional information for launching instances.
+ returned: always
+ type: dict
+ contains:
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ image_id:
+ description: The ID of the AMI.
+ returned: always
+ type: str
+ instance_type:
+ description: The instance type.
+ returned: always
+ type: str
+ key_name:
+ description: The name of the key pair.
+ returned: always
+ type: str
+ monitoring:
+ description: Described the monitoring of an instance.
+ returned: always
+ type: dict
+ contains:
+ enabled:
+ description: Indicated whether detailed monitoring is enabled.
+ returned: always
+ type: bool
+ placement:
+ description: The placement information for the instance.
+ returned: always
+ type: dict
+ contains:
+ availability_zone:
+ description: The name of the availability zone.
+ returned: always
+ type: str
+ security_groups:
+ description: List of security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ subnet_id:
+ description: The ID of the subnet.
+ returned: when creating a network interface when launching an instance
+ type: str
+ launched_availability_zone:
+ description: The availability zone in which the request is launched.
+ returned: always
+ type: str
+ product_description:
+ description: The product description associated with the Spot Instance.
+ returned: always
+ type: str
+ spot_instance_request_id:
+ description: The ID of the Spot Instance request.
+ returned: always
+ type: str
+ spot_price:
+ description: The maximum price per hour that you are willing to pay for a Spot Instance.
+ returned: always
+ type: str
+ state:
+ description: The state of the Spot Instance request.
+ returned: always
+ type: str
+ status:
+ description: Extra information about the status of the Spot Instance request.
+ returned: always
+ type: dict
+ contains:
+ code:
+ description:
+ - The status code.
+ - See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-request-status.html#spot-instance-request-status-understand for codes.
+ returned: always
+ type: str
+ message:
+ description: The description of the status code.
+ returned: always
+ type: str
+ update_time:
+ description: The date and time of the most recent status update in UTC format.
+ returned: always
+ type: str
+ tags:
+ description: List of tags associated with the resource.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: The key of the tag.
+ returned: always
+ type: str
+ value:
+ description: The value of the tag.
+ returned: always
+ type: str
+ type:
+ description: The Spot Instance request type.
+ returned: always
+ type: str
+ valid_until:
+ description: The end date of the request in UTC format.
+ returned: always
+ type: str
+ sample: {
+ "create_time": "2021-09-01T21:05:57+00:00",
+ "instance_id": "i-08877936b801ac475",
+ "instance_interruption_behavior": "terminate",
+ "launch_specification": {
+ "ebs_optimized": false,
+ "image_id": "ami-0443305dabd4be2bc",
+ "instance_type": "t2.medium",
+ "key_name": "zuul",
+ "monitoring": {
+ "enabled": false
+ },
+ "placement": {
+ "availability_zone": "us-east-2b"
+ },
+ "security_groups": [
+ {
+ "group_id": "sg-01f9833207d53b937",
+ "group_name": "default"
+ }
+ ],
+ "subnet_id": "subnet-07d906b8358869bda"
+ },
+ "launched_availability_zone": "us-east-2b",
+ "product_description": "Linux/UNIX",
+ "spot_instance_request_id": "sir-c3cp9jsk",
+ "spot_price": "0.046400",
+ "state": "active",
+ "status": {
+ "code": "fulfilled",
+ "message": "Your spot request is fulfilled.",
+ "update_time": "2021-09-01T21:05:59+00:00"
+ },
+ "tags": {},
+ "type": "one-time",
+ "valid_until": "2021-09-08T21:05:57+00:00"
+ }
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+
+def _describe_spot_instance_requests(connection, **params):
+ paginator = connection.get_paginator('describe_spot_instance_requests')
+ return paginator.paginate(**params).build_full_result()
+
+
+def describe_spot_instance_requests(connection, module):
+
+ params = {}
+
+ if module.params.get('filters'):
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ if module.params.get('spot_instance_request_ids'):
+ params['SpotInstanceRequestIds'] = module.params.get('spot_instance_request_ids')
+
+ try:
+ describe_spot_instance_requests_response = _describe_spot_instance_requests(connection, **params)['SpotInstanceRequests']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to describe spot instance requests')
+
+ spot_request = []
+ for response_list_item in describe_spot_instance_requests_response:
+ spot_request.append(camel_dict_to_snake_dict(response_list_item))
+
+ if len(spot_request) == 0:
+ module.exit_json(msg='No spot requests found for specified options')
+
+ module.exit_json(spot_request=spot_request)
+
+
+def main():
+
+ argument_spec = dict(
+ filters=dict(default={}, type='dict'),
+ spot_instance_request_ids=dict(default=[], type='list', elements='str'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ try:
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ describe_spot_instance_requests(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py b/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py
new file mode 100644
index 00000000..6ccf687e
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_tag
+version_added: 1.0.0
+short_description: Create and remove tags on ec2 resources
+description:
+ - Creates, modifies and removes tags for any EC2 resource.
+ - Resources are referenced by their resource id (for example, an instance being i-XXXXXXX, a VPC being vpc-XXXXXXX).
+ - This module is designed to be used with complex args (tags), see the examples.
+options:
+ resource:
+ description:
+ - The EC2 resource id.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the tags should be present or absent on the resource.
+ - The use of I(state=list) to interrogate the tags of an instance was
+ deprecated in release 1.0.0 and is no longer available in release 4.0.0.
+ The 'list' functionality has been moved to a dedicated module
+ M(amazon.aws.ec2_tag_info).
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ tags:
+ description:
+ - A dictionary of tags to add or remove from the resource.
+ - If the value provided for a key is not set and I(state=absent), the tag will be removed regardless of its current value.
+ type: dict
+ required: true
+ purge_tags:
+ description:
+ - Whether unspecified tags should be removed from the resource.
+ - Note that when combined with I(state=absent), specified tags with non-matching values are not purged.
+ type: bool
+ default: false
+
+author:
+ - Lester Wade (@lwade)
+ - Paul Arthur (@flowerysong)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Ensure tags are present on a resource
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: vol-XXXXXX
+ state: present
+ tags:
+ Name: ubervol
+ env: prod
+
+- name: Ensure all volumes are tagged
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: '{{ item.id }}'
+ state: present
+ tags:
+ Name: dbserver
+ Env: production
+ loop: '{{ ec2_vol.volumes }}'
+
+- name: Remove the Env tag
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: i-xxxxxxxxxxxxxxxxx
+ tags:
+ Env:
+ state: absent
+
+- name: Remove the Env tag if it's currently 'development'
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: i-xxxxxxxxxxxxxxxxx
+ tags:
+ Env: development
+ state: absent
+
+- name: Remove all tags except for Name from an instance
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: i-xxxxxxxxxxxxxxxxx
+ tags:
+ Name: ''
+ state: absent
+ purge_tags: true
+'''
+
+RETURN = '''
+tags:
+ description: A dict containing the tags on the resource
+ returned: always
+ type: dict
+added_tags:
+ description: A dict of tags that were added to the resource
+ returned: If tags were added
+ type: dict
+removed_tags:
+ description: A dict of tags that were removed from the resource
+ returned: If tags were removed
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import remove_ec2_tags
+
+
+def main():
+ argument_spec = dict(
+ resource=dict(required=True),
+ tags=dict(type='dict', required=True),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ resource = module.params['resource']
+ tags = module.params['tags']
+ state = module.params['state']
+ purge_tags = module.params['purge_tags']
+
+ result = {'changed': False}
+
+ ec2 = module.client('ec2')
+
+ current_tags = describe_ec2_tags(ec2, module, resource)
+
+ if state == 'absent':
+ removed_tags = {}
+ for key in tags:
+ if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
+ result['changed'] = True
+ removed_tags[key] = current_tags[key]
+ result['removed_tags'] = removed_tags
+ remove_ec2_tags(ec2, module, resource, removed_tags.keys())
+
+ if state == 'present':
+ tags_to_set, tags_to_unset = compare_aws_tags(current_tags, tags, purge_tags)
+ if tags_to_unset:
+ result['removed_tags'] = {}
+ for key in tags_to_unset:
+ result['removed_tags'][key] = current_tags[key]
+ result['added_tags'] = tags_to_set
+ result['changed'] = ensure_ec2_tags(ec2, module, resource, tags=tags, purge_tags=purge_tags)
+
+ result['tags'] = describe_ec2_tags(ec2, module, resource)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py
new file mode 100644
index 00000000..6be53656
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py
@@ -0,0 +1,73 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_tag_info
+version_added: 1.0.0
+short_description: List tags on ec2 resources
+description:
+ - Lists tags for any EC2 resource.
+ - Resources are referenced by their resource id (e.g. an instance being i-XXXXXXX, a vpc being vpc-XXXXXX).
+ - Resource tags can be managed using the M(amazon.aws.ec2_tag) module.
+options:
+ resource:
+ description:
+ - The EC2 resource id (for example i-XXXXXX or vpc-XXXXXX).
+ required: true
+ type: str
+
+author:
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Retrieve all tags on an instance
+ amazon.aws.ec2_tag_info:
+ region: eu-west-1
+ resource: i-xxxxxxxxxxxxxxxxx
+ register: instance_tags
+
+- name: Retrieve all tags on a VPC
+ amazon.aws.ec2_tag_info:
+ region: eu-west-1
+ resource: vpc-xxxxxxxxxxxxxxxxx
+ register: vpc_tags
+'''
+
+RETURN = '''
+tags:
+ description: A dict containing the tags on the resource
+ returned: always
+ type: dict
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags
+
+
+def main():
+ argument_spec = dict(
+ resource=dict(required=True),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ resource = module.params['resource']
+ ec2 = module.client('ec2')
+
+ current_tags = describe_ec2_tags(ec2, module, resource)
+
+ module.exit_json(changed=False, tags=current_tags)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
new file mode 100644
index 00000000..adefaf73
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
@@ -0,0 +1,862 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vol
+version_added: 1.0.0
+short_description: Create and attach a volume, return volume ID and device map
+description:
+ - Creates an EBS volume and optionally attaches it to an instance.
+ - If both I(instance) and I(name) are given and the instance has a device at the device name, then no volume is created and no attachment is made.
+options:
+ instance:
+ description:
+ - Instance ID if you wish to attach the volume.
+ - Set to C(None) to detach the volume.
+ type: str
+ name:
+ description:
+ - Volume Name tag if you wish to attach an existing volume (requires instance).
+ type: str
+ id:
+ description:
+ - Volume ID if you wish to attach an existing volume (requires instance) or remove an existing volume.
+ type: str
+ volume_size:
+ description:
+ - Size of volume (in GiB) to create.
+ type: int
+ volume_type:
+ description:
+ - Type of EBS volume; C(standard) (magnetic), C(gp2) (SSD), C(gp3) (SSD), C(io1) (Provisioned IOPS), C(io2) (Provisioned IOPS),
+ C(st1) (Throughput Optimized HDD), C(sc1) (Cold HDD).
+ - C(standard) is the old EBS default and continues to remain the Ansible default for backwards compatibility.
+ default: standard
+ choices: ['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']
+ type: str
+ iops:
+ description:
+ - The provisioned IOPs you want to associate with this volume (integer).
+ type: int
+ encrypted:
+ description:
+ - Enable encryption at rest for this volume.
+ default: false
+ type: bool
+ kms_key_id:
+ description:
+ - Specify the ID of the KMS key to use.
+ type: str
+ device_name:
+ description:
+ - Device ID to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
+ type: str
+ delete_on_termination:
+ description:
+ - When set to C(true), the volume will be deleted upon instance termination.
+ type: bool
+ default: false
+ zone:
+ description:
+ - Zone in which to create the volume, if unset uses the zone the instance is in (if set).
+ aliases: ['availability_zone', 'aws_zone', 'ec2_zone']
+ type: str
+ snapshot:
+ description:
+ - Snapshot ID on which to base the volume.
+ type: str
+ state:
+ description:
+ - Whether to ensure the volume is present or absent.
+ - I(state=list) was deprecated in release 1.1.0 and is no longer available
+ with release 4.0.0.
+ - The C(list) functionality has been moved to a dedicated module M(amazon.aws.ec2_vol_info).
+ default: present
+ choices: ['absent', 'present']
+ type: str
+ modify_volume:
+ description:
+ - The volume won't be modified unless this key is C(true).
+ type: bool
+ default: false
+ version_added: 1.4.0
+ throughput:
+ description:
+ - Volume throughput in MB/s.
+ - This parameter is only valid for gp3 volumes.
+ - Valid range is from 125 to 1000.
+ type: int
+ version_added: 1.4.0
+ multi_attach:
+ description:
+ - If set to C(true), Multi-Attach will be enabled when creating the volume.
+ - When you create a new volume, Multi-Attach is disabled by default.
+ - This parameter is supported with io1 and io2 volumes only.
+ type: bool
+ version_added: 2.0.0
+ outpost_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the Outpost.
+ - If set, allows to create volume in an Outpost.
+ type: str
+ version_added: 3.1.0
+author:
+ - "Lester Wade (@lwade)"
+notes:
+ - Support for I(purge_tags) was added in release 1.5.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Simple attachment action
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ volume_size: 5
+ device_name: sdd
+ region: us-west-2
+
+# Example using custom iops params
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ volume_size: 5
+ iops: 100
+ device_name: sdd
+ region: us-west-2
+
+# Example using snapshot id
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ snapshot: "{{ snapshot }}"
+
+# Playbook example combined with instance launch
+- amazon.aws.ec2:
+ keypair: "{{ keypair }}"
+ image: "{{ image }}"
+ wait: true
+ count: 3
+ register: ec2
+- amazon.aws.ec2_vol:
+ instance: "{{ item.id }}"
+ volume_size: 5
+ loop: "{{ ec2.instances }}"
+ register: ec2_vol
+
+# Example: Launch an instance and then add a volume if not already attached
+# * Volume will be created with the given name if not already created.
+# * Nothing will happen if the volume is already attached.
+
+- amazon.aws.ec2:
+ keypair: "{{ keypair }}"
+ image: "{{ image }}"
+ zone: YYYYYY
+ id: my_instance
+ wait: true
+ count: 1
+ register: ec2
+
+- amazon.aws.ec2_vol:
+ instance: "{{ item.id }}"
+ name: my_existing_volume_Name_tag
+ device_name: /dev/xvdf
+ loop: "{{ ec2.instances }}"
+ register: ec2_vol
+
+# Remove a volume
+- amazon.aws.ec2_vol:
+ id: vol-XXXXXXXX
+ state: absent
+
+# Detach a volume (since 1.9)
+- amazon.aws.ec2_vol:
+ id: vol-XXXXXXXX
+ instance: None
+ region: us-west-2
+
+# Create new volume using SSD storage
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ volume_size: 50
+ volume_type: gp2
+ device_name: /dev/xvdf
+
+# Create new volume with multi-attach enabled
+- amazon.aws.ec2_vol:
+ zone: XXXXXX
+ multi_attach: true
+ volume_size: 4
+ volume_type: io1
+ iops: 102
+
+# Attach an existing volume to instance. The volume will be deleted upon instance termination.
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ id: XXXXXX
+ device_name: /dev/sdf
+ delete_on_termination: true
+'''
+
+RETURN = '''
+device:
+ description: device name of attached volume
+ returned: when success
+ type: str
+ sample: "/def/sdf"
+volume_id:
+ description: the id of volume
+ returned: when success
+ type: str
+ sample: "vol-35b333d9"
+volume_type:
+ description: the volume type
+ returned: when success
+ type: str
+ sample: "standard"
+volume:
+ description: a dictionary containing detailed attributes of the volume
+ returned: when success
+ type: str
+ sample: {
+ "attachment_set": [{
+ "attach_time": "2015-10-23T00:22:29.000Z",
+ "deleteOnTermination": "false",
+ "device": "/dev/sdf",
+ "instance_id": "i-8356263c",
+ "status": "attached"
+ }],
+ "create_time": "2015-10-21T14:36:08.870Z",
+ "encrypted": false,
+ "id": "vol-35b333d9",
+ "iops": null,
+ "size": 1,
+ "snapshot_id": "",
+ "status": "in-use",
+ "tags": {
+ "env": "dev"
+ },
+ "type": "standard",
+ "zone": "us-east-1b"
+ }
+'''
+
+import time
+
+from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Taken care of by AnsibleAWSModule
+
+
+def get_instance(module, ec2_conn, instance_id=None):
+ instance = None
+ if not instance_id:
+ return instance
+
+ try:
+ reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance_id])
+ instance = camel_dict_to_snake_dict(reservation_response['Reservations'][0]['Instances'][0])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while getting instance_id with id {0}'.format(instance))
+
+ return instance
+
+
+def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True):
+ name = module.params.get('name')
+ param_id = module.params.get('id')
+ zone = module.params.get('zone')
+
+ if not vol_id:
+ vol_id = param_id
+
+ # If no name or id supplied, just try volume creation based on module parameters
+ if vol_id is None and name is None:
+ return None
+
+ find_params = dict()
+ vols = []
+
+ if vol_id:
+ find_params['VolumeIds'] = [vol_id]
+ elif name:
+ find_params['Filters'] = ansible_dict_to_boto3_filter_list({'tag:Name': name})
+ elif zone:
+ find_params['Filters'] = ansible_dict_to_boto3_filter_list({'availability-zone': zone})
+
+ try:
+ paginator = ec2_conn.get_paginator('describe_volumes')
+ vols_response = paginator.paginate(**find_params)
+ vols = list(vols_response)[0].get('Volumes')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if is_boto3_error_code('InvalidVolume.NotFound'):
+ module.exit_json(msg="Volume {0} does not exist".format(vol_id), changed=False)
+ module.fail_json_aws(e, msg='Error while getting EBS volumes with the parameters {0}'.format(find_params))
+
+ if not vols:
+ if fail_on_not_found and vol_id:
+ msg = "Could not find volume with id: {0}".format(vol_id)
+ if name:
+ msg += (" and name: {0}".format(name))
+ module.fail_json(msg=msg)
+ else:
+ return None
+
+ if len(vols) > 1:
+ module.fail_json(
+ msg="Found more than one volume in zone (if specified) with name: {0}".format(name),
+ found=[v['VolumeId'] for v in vols]
+ )
+ vol = camel_dict_to_snake_dict(vols[0])
+ return vol
+
+
+def get_volumes(module, ec2_conn):
+ instance = module.params.get('instance')
+
+ find_params = dict()
+ if instance:
+ find_params['Filters'] = ansible_dict_to_boto3_filter_list({'attachment.instance-id': instance})
+
+ vols = []
+ try:
+ vols_response = ec2_conn.describe_volumes(aws_retry=True, **find_params)
+ vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get('Volumes', [])]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while getting EBS volumes')
+ return vols
+
+
+def delete_volume(module, ec2_conn, volume_id=None):
+ changed = False
+ if volume_id:
+ try:
+ ec2_conn.delete_volume(aws_retry=True, VolumeId=volume_id)
+ changed = True
+ except is_boto3_error_code('InvalidVolume.NotFound'):
+ module.exit_json(changed=False)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Error while deleting volume')
+ return changed
+
+
+def update_volume(module, ec2_conn, volume):
+ changed = False
+ req_obj = {'VolumeId': volume['volume_id']}
+
+ if module.params.get('modify_volume'):
+ target_type = module.params.get('volume_type')
+ original_type = None
+ type_changed = False
+ if target_type:
+ original_type = volume['volume_type']
+ if target_type != original_type:
+ type_changed = True
+ req_obj['VolumeType'] = target_type
+
+ iops_changed = False
+ target_iops = module.params.get('iops')
+ original_iops = volume.get('iops')
+ if target_iops:
+ if target_iops != original_iops:
+ iops_changed = True
+ req_obj['Iops'] = target_iops
+ else:
+ req_obj['Iops'] = original_iops
+ else:
+ # If no IOPS value is specified and there was a volume_type update to gp3,
+ # the existing value is retained, unless a volume type is modified that supports different values,
+ # otherwise, the default iops value is applied.
+ if type_changed and target_type == 'gp3':
+ if (
+ (original_iops and (int(original_iops) < 3000 or int(original_iops) > 16000)) or not original_iops
+ ):
+ req_obj['Iops'] = 3000
+ iops_changed = True
+
+ target_size = module.params.get('volume_size')
+ size_changed = False
+ if target_size:
+ original_size = volume['size']
+ if target_size != original_size:
+ size_changed = True
+ req_obj['Size'] = target_size
+
+ target_type = module.params.get('volume_type')
+ original_type = None
+ type_changed = False
+ if target_type:
+ original_type = volume['volume_type']
+ if target_type != original_type:
+ type_changed = True
+ req_obj['VolumeType'] = target_type
+
+ target_throughput = module.params.get('throughput')
+ throughput_changed = False
+ if target_throughput:
+ original_throughput = volume.get('throughput')
+ if target_throughput != original_throughput:
+ throughput_changed = True
+ req_obj['Throughput'] = target_throughput
+
+ target_multi_attach = module.params.get('multi_attach')
+ multi_attach_changed = False
+ if target_multi_attach is not None:
+ original_multi_attach = volume['multi_attach_enabled']
+ if target_multi_attach != original_multi_attach:
+ multi_attach_changed = True
+ req_obj['MultiAttachEnabled'] = target_multi_attach
+
+ changed = iops_changed or size_changed or type_changed or throughput_changed or multi_attach_changed
+
+ if changed:
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have updated volume if not in check mode.')
+ response = ec2_conn.modify_volume(**req_obj)
+
+ volume['size'] = response.get('VolumeModification').get('TargetSize')
+ volume['volume_type'] = response.get('VolumeModification').get('TargetVolumeType')
+ volume['iops'] = response.get('VolumeModification').get('TargetIops')
+ volume['multi_attach_enabled'] = response.get('VolumeModification').get('TargetMultiAttachEnabled')
+ volume['throughput'] = response.get('VolumeModification').get('TargetThroughput')
+
+ return volume, changed
+
+
+def create_volume(module, ec2_conn, zone):
+ changed = False
+ iops = module.params.get('iops')
+ encrypted = module.params.get('encrypted')
+ kms_key_id = module.params.get('kms_key_id')
+ volume_size = module.params.get('volume_size')
+ volume_type = module.params.get('volume_type')
+ snapshot = module.params.get('snapshot')
+ throughput = module.params.get('throughput')
+ multi_attach = module.params.get('multi_attach')
+ outpost_arn = module.params.get('outpost_arn')
+ tags = module.params.get('tags') or {}
+ name = module.params.get('name')
+
+ volume = get_volume(module, ec2_conn)
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have created a volume if not in check mode.')
+
+ if volume is None:
+
+ try:
+ changed = True
+ additional_params = dict()
+
+ if volume_size:
+ additional_params['Size'] = int(volume_size)
+
+ if kms_key_id:
+ additional_params['KmsKeyId'] = kms_key_id
+
+ if snapshot:
+ additional_params['SnapshotId'] = snapshot
+
+ if iops:
+ additional_params['Iops'] = int(iops)
+
+ # Use the default value if any iops has been specified when volume_type=gp3
+ if volume_type == 'gp3' and not iops:
+ additional_params['Iops'] = 3000
+
+ if throughput:
+ additional_params['Throughput'] = int(throughput)
+
+ if multi_attach:
+ additional_params['MultiAttachEnabled'] = True
+
+ if outpost_arn:
+ if is_outpost_arn(outpost_arn):
+ additional_params['OutpostArn'] = outpost_arn
+ else:
+ module.fail_json('OutpostArn does not match the pattern specified in API specifications.')
+
+ if name:
+ tags['Name'] = name
+
+ if tags:
+ additional_params['TagSpecifications'] = boto3_tag_specifications(tags, types=['volume'])
+
+ create_vol_response = ec2_conn.create_volume(
+ aws_retry=True,
+ AvailabilityZone=zone,
+ Encrypted=encrypted,
+ VolumeType=volume_type,
+ **additional_params
+ )
+
+ waiter = ec2_conn.get_waiter('volume_available')
+ waiter.wait(
+ VolumeIds=[create_vol_response['VolumeId']],
+ )
+ volume = get_volume(module, ec2_conn, vol_id=create_vol_response['VolumeId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while creating EBS volume')
+
+ return volume, changed
+
+
+def attach_volume(module, ec2_conn, volume_dict, instance_dict, device_name):
+ changed = False
+
+ # If device_name isn't set, make a choice based on best practices here:
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
+
+ # In future this needs to be more dynamic but combining block device mapping best practices
+ # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
+
+ attachment_data = get_attachment_data(volume_dict, wanted_state='attached')
+ if attachment_data:
+ if module.check_mode:
+ if attachment_data[0].get('status') in ['attached', 'attaching']:
+ module.exit_json(changed=False, msg='IN CHECK MODE - volume already attached to instance: {0}.'.format(
+ attachment_data[0].get('instance_id', None)))
+ if not volume_dict['multi_attach_enabled']:
+ # volumes without MultiAttach Enabled can be attached to 1 instance only
+ if attachment_data[0].get('instance_id', None) != instance_dict['instance_id']:
+ module.fail_json(msg="Volume {0} is already attached to another instance: {1}."
+ .format(volume_dict['volume_id'], attachment_data[0].get('instance_id', None)))
+ else:
+ return volume_dict, changed
+
+ try:
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have attached volume if not in check mode.')
+ attach_response = ec2_conn.attach_volume(aws_retry=True, Device=device_name,
+ InstanceId=instance_dict['instance_id'],
+ VolumeId=volume_dict['volume_id'])
+
+ waiter = ec2_conn.get_waiter('volume_in_use')
+ waiter.wait(VolumeIds=[attach_response['VolumeId']])
+ changed = True
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while attaching EBS volume')
+
+ modify_dot_attribute(module, ec2_conn, instance_dict, device_name)
+
+ volume = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id'])
+
+ return volume, changed
+
+
+def modify_dot_attribute(module, ec2_conn, instance_dict, device_name):
+ """ Modify delete_on_termination attribute """
+
+ delete_on_termination = module.params.get('delete_on_termination')
+ changed = False
+
+ # volume_in_use can return *shortly* before it appears on the instance
+ # description
+ mapped_block_device = None
+ _attempt = 0
+ while mapped_block_device is None:
+ _attempt += 1
+ instance_dict = get_instance(module, ec2_conn=ec2_conn, instance_id=instance_dict['instance_id'])
+ mapped_block_device = get_mapped_block_device(instance_dict=instance_dict, device_name=device_name)
+ if mapped_block_device is None:
+ if _attempt > 2:
+ module.fail_json(msg='Unable to find device on instance',
+ device=device_name, instance=instance_dict)
+ time.sleep(1)
+
+ if delete_on_termination != mapped_block_device['ebs'].get('delete_on_termination'):
+ try:
+ ec2_conn.modify_instance_attribute(
+ aws_retry=True,
+ InstanceId=instance_dict['instance_id'],
+ BlockDeviceMappings=[{
+ "DeviceName": device_name,
+ "Ebs": {
+ "DeleteOnTermination": delete_on_termination
+ }
+ }]
+ )
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e,
+ msg='Error while modifying Block Device Mapping of instance {0}'.format(instance_dict['instance_id']))
+
+ return changed
+
+
+def get_attachment_data(volume_dict, wanted_state=None):
+ attachment_data = []
+ if not volume_dict:
+ return attachment_data
+ resource = volume_dict.get('attachments', [])
+ if wanted_state:
+ # filter 'state', return attachment matching wanted state
+ resource = [data for data in resource if data['state'] == wanted_state]
+
+ for data in resource:
+ attachment_data.append({
+ 'attach_time': data.get('attach_time', None),
+ 'device': data.get('device', None),
+ 'instance_id': data.get('instance_id', None),
+ 'status': data.get('state', None),
+ 'delete_on_termination': data.get('delete_on_termination', None)
+ })
+
+ return attachment_data
+
+
+def detach_volume(module, ec2_conn, volume_dict):
+ changed = False
+
+ attachment_data = get_attachment_data(volume_dict, wanted_state='attached')
+ # The ID of the instance must be specified if you are detaching a Multi-Attach enabled volume.
+ for attachment in attachment_data:
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have detached volume if not in check mode.')
+ ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment['instance_id'], VolumeId=volume_dict['volume_id'])
+ waiter = ec2_conn.get_waiter('volume_available')
+ waiter.wait(
+ VolumeIds=[volume_dict['volume_id']],
+ )
+ changed = True
+
+ volume_dict = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id'])
+ return volume_dict, changed
+
+
+def get_volume_info(module, volume, tags=None):
+ if not tags:
+ tags = boto3_tag_list_to_ansible_dict(volume.get('tags'))
+ attachment_data = get_attachment_data(volume)
+ volume_info = {
+ 'create_time': volume.get('create_time'),
+ 'encrypted': volume.get('encrypted'),
+ 'id': volume.get('volume_id'),
+ 'iops': volume.get('iops'),
+ 'size': volume.get('size'),
+ 'snapshot_id': volume.get('snapshot_id'),
+ 'status': volume.get('state'),
+ 'type': volume.get('volume_type'),
+ 'zone': volume.get('availability_zone'),
+ 'attachment_set': attachment_data,
+ 'multi_attach_enabled': volume.get('multi_attach_enabled'),
+ 'tags': tags
+ }
+
+ volume_info['throughput'] = volume.get('throughput')
+
+ return volume_info
+
+
+def get_mapped_block_device(instance_dict=None, device_name=None):
+ mapped_block_device = None
+ if not instance_dict:
+ return mapped_block_device
+ if not device_name:
+ return mapped_block_device
+
+ for device in instance_dict.get('block_device_mappings', []):
+ if device['device_name'] == device_name:
+ mapped_block_device = device
+ break
+
+ return mapped_block_device
+
+
+def ensure_tags(module, connection, res_id, res_type, tags, purge_tags):
+ if module.check_mode:
+ return {}, True
+ changed = ensure_ec2_tags(connection, module, res_id, res_type, tags, purge_tags, ['InvalidVolume.NotFound'])
+ final_tags = describe_ec2_tags(connection, module, res_id, res_type)
+
+ return final_tags, changed
+
+
+def main():
+ argument_spec = dict(
+ instance=dict(),
+ id=dict(),
+ name=dict(),
+ volume_size=dict(type='int'),
+ volume_type=dict(default='standard', choices=['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']),
+ iops=dict(type='int'),
+ encrypted=dict(default=False, type='bool'),
+ kms_key_id=dict(),
+ device_name=dict(),
+ delete_on_termination=dict(default=False, type='bool'),
+ zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
+ snapshot=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ modify_volume=dict(default=False, type='bool'),
+ throughput=dict(type='int'),
+ outpost_arn=dict(type='str'),
+ purge_tags=dict(type='bool', default=True),
+ multi_attach=dict(type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['volume_type', 'io1', ['iops']],
+ ['volume_type', 'io2', ['iops']],
+ ],
+ supports_check_mode=True,
+ )
+
+ param_id = module.params.get('id')
+ name = module.params.get('name')
+ instance = module.params.get('instance')
+ volume_size = module.params.get('volume_size')
+ device_name = module.params.get('device_name')
+ zone = module.params.get('zone')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ iops = module.params.get('iops')
+ volume_type = module.params.get('volume_type')
+ throughput = module.params.get('throughput')
+ multi_attach = module.params.get('multi_attach')
+
+ # Ensure we have the zone or can get the zone
+ if instance is None and zone is None and state == 'present':
+ module.fail_json(msg="You must specify either instance or zone")
+
+ # Set volume detach flag
+ if instance == 'None' or instance == '':
+ instance = None
+ detach_vol_flag = True
+ else:
+ detach_vol_flag = False
+
+ if iops:
+ if volume_type in ('gp2', 'st1', 'sc1', 'standard'):
+ module.fail_json(msg='IOPS is not supported for gp2, st1, sc1, or standard volumes.')
+
+ if volume_type == 'gp3' and (int(iops) < 3000 or int(iops) > 16000):
+ module.fail_json(msg='For a gp3 volume type, IOPS values must be between 3000 and 16000.')
+
+ if volume_type in ('io1', 'io2') and (int(iops) < 100 or int(iops) > 64000):
+ module.fail_json(msg='For io1 and io2 volume types, IOPS values must be between 100 and 64000.')
+
+ if throughput:
+ if volume_type != 'gp3':
+ module.fail_json(msg='Throughput is only supported for gp3 volume.')
+ if throughput < 125 or throughput > 1000:
+ module.fail_json(msg='Throughput values must be between 125 and 1000.')
+
+ if multi_attach is True and volume_type not in ('io1', 'io2'):
+ module.fail_json(msg='multi_attach is only supported for io1 and io2 volumes.')
+
+ # Set changed flag
+ changed = False
+
+ ec2_conn = module.client('ec2', AWSRetry.jittered_backoff())
+
+ # Here we need to get the zone info for the instance. This covers situation where
+ # instance is specified but zone isn't.
+ # Useful for playbooks chaining instance launch with volume create + attach and where the
+ # zone doesn't matter to the user.
+ inst = None
+
+ # Delaying the checks until after the instance check allows us to get volume ids for existing volumes
+ # without needing to pass an unused volume_size
+ if not volume_size and not (param_id or name or snapshot):
+ module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
+
+ # Try getting volume
+ volume = get_volume(module, ec2_conn, fail_on_not_found=False)
+ if state == 'present':
+ if instance:
+ inst = get_instance(module, ec2_conn, instance_id=instance)
+ zone = inst['placement']['availability_zone']
+
+ # Use platform attribute to guess whether the instance is Windows or Linux
+ if device_name is None:
+ if inst.get('platform', '') == 'Windows':
+ device_name = '/dev/xvdf'
+ else:
+ device_name = '/dev/sdf'
+
+ # Check if there is a volume already mounted there.
+ mapped_device = get_mapped_block_device(instance_dict=inst, device_name=device_name)
+ if mapped_device:
+ other_volume_mapped = False
+
+ if volume:
+ if volume['volume_id'] != mapped_device['ebs']['volume_id']:
+ other_volume_mapped = True
+ else:
+ # No volume found so this is another volume
+ other_volume_mapped = True
+
+ if other_volume_mapped:
+ module.exit_json(
+ msg="Volume mapping for {0} already exists on instance {1}".format(device_name, instance),
+ volume_id=mapped_device['ebs']['volume_id'],
+ found_volume=volume,
+ device=device_name,
+ changed=False
+ )
+
+ final_tags = None
+ tags_changed = False
+
+ if volume:
+ volume, changed = update_volume(module, ec2_conn, volume)
+ if name:
+ tags['Name'] = name
+ final_tags, tags_changed = ensure_tags(module, ec2_conn, volume['volume_id'], 'volume', tags, module.params.get('purge_tags'))
+ else:
+ volume, changed = create_volume(module, ec2_conn, zone=zone)
+
+ if detach_vol_flag:
+ volume, attach_changed = detach_volume(module, ec2_conn, volume_dict=volume)
+ elif inst is not None:
+ volume, attach_changed = attach_volume(module, ec2_conn, volume_dict=volume, instance_dict=inst, device_name=device_name)
+ else:
+ attach_changed = False
+
+ # Add device, volume_id and volume_type parameters separately to maintain backward compatibility
+ volume_info = get_volume_info(module, volume, tags=final_tags)
+
+ if tags_changed or attach_changed:
+ changed = True
+
+ module.exit_json(changed=changed, volume=volume_info, device=device_name,
+ volume_id=volume_info['id'], volume_type=volume_info['type'])
+ elif state == 'absent':
+ if not name and not param_id:
+ module.fail_json('A volume name or id is required for deletion')
+ if volume:
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Would have deleted volume if not in check mode.')
+ detach_volume(module, ec2_conn, volume_dict=volume)
+ changed = delete_volume(module, ec2_conn, volume_id=volume['volume_id'])
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py
new file mode 100644
index 00000000..2db6b2e6
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vol_info
+version_added: 1.0.0
+short_description: Gather information about EC2 volumes in AWS
+description:
+ - Gather information about EC2 volumes in AWS.
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ type: dict
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all volumes
+- amazon.aws.ec2_vol_info:
+
+# Gather information about a particular volume using volume ID
+- amazon.aws.ec2_vol_info:
+ filters:
+ volume-id: vol-00112233
+
+# Gather information about any volume with a tag key Name and value Example
+- amazon.aws.ec2_vol_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any volume that is attached
+- amazon.aws.ec2_vol_info:
+ filters:
+ attachment.status: attached
+
+# Gather information about all volumes related to an EC2 Instance
+# register information to `volumes` variable
+# Replaces functionality of `amazon.aws.ec2_vol` - `state: list`
+- name: get volume(s) info from EC2 Instance
+ amazon.aws.ec2_vol_info:
+ filters:
+ attachment.instance-id: "i-000111222333"
+ register: volumes
+
+'''
+
+RETURN = '''
+volumes:
+ description: Volumes that match the provided filters. Each element consists of a dict with all the information related to that volume.
+ type: list
+ elements: dict
+ returned: always
+ contains:
+ attachment_set:
+ description:
+ - Information about the volume attachments.
+ - This was changed in version 2.0.0 from a dictionary to a list of dictionaries.
+ type: list
+ elements: dict
+ sample: [{
+ "attach_time": "2015-10-23T00:22:29.000Z",
+ "deleteOnTermination": "false",
+ "device": "/dev/sdf",
+ "instance_id": "i-8356263c",
+ "status": "attached"
+ }]
+ create_time:
+ description: The time stamp when volume creation was initiated.
+ type: str
+ sample: "2015-10-21T14:36:08.870Z"
+ encrypted:
+ description: Indicates whether the volume is encrypted.
+ type: bool
+ sample: False
+ id:
+ description: The ID of the volume.
+ type: str
+ sample: "vol-35b333d9"
+ iops:
+ description: The number of I/O operations per second (IOPS) that the volume supports.
+ type: int
+ sample: null
+ size:
+ description: The size of the volume, in GiBs.
+ type: int
+ sample: 1
+ snapshot_id:
+ description: The snapshot from which the volume was created, if applicable.
+ type: str
+ sample: ""
+ status:
+ description: The volume state.
+ type: str
+ sample: "in-use"
+ tags:
+ description: Any tags assigned to the volume.
+ type: dict
+ sample: {
+ env: "dev"
+ }
+ type:
+ description: The volume type. This can be gp2, io1, st1, sc1, or standard.
+ type: str
+ sample: "standard"
+ zone:
+ description: The Availability Zone of the volume.
+ type: str
+ sample: "us-east-1b"
+ throughput:
+ description: The throughput that the volume supports, in MiB/s.
+ type: int
+ sample: 131
+'''
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_volume_info(volume, region):
+
+ attachment_data = []
+ for data in volume["attachments"]:
+ attachment_data.append({
+ 'attach_time': data.get('attach_time', None),
+ 'device': data.get('device', None),
+ 'instance_id': data.get('instance_id', None),
+ 'status': data.get('state', None),
+ 'delete_on_termination': data.get('delete_on_termination', None)
+ })
+
+ volume_info = {
+ 'create_time': volume["create_time"],
+ 'id': volume["volume_id"],
+ 'encrypted': volume["encrypted"],
+ 'iops': volume["iops"] if "iops" in volume else None,
+ 'size': volume["size"],
+ 'snapshot_id': volume["snapshot_id"],
+ 'status': volume["state"],
+ 'type': volume["volume_type"],
+ 'zone': volume["availability_zone"],
+ 'region': region,
+ 'attachment_set': attachment_data,
+ 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None
+ }
+
+ if 'throughput' in volume:
+ volume_info['throughput'] = volume["throughput"]
+
+ return volume_info
+
+
+@AWSRetry.jittered_backoff()
+def describe_volumes_with_backoff(connection, filters):
+ paginator = connection.get_paginator('describe_volumes')
+ return paginator.paginate(Filters=filters).build_full_result()
+
+
+def list_ec2_volumes(connection, module):
+
+ # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
+ sanitized_filters = module.params.get("filters")
+ for key in list(sanitized_filters):
+ if not key.startswith("tag:"):
+ sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
+ volume_dict_array = []
+
+ try:
+ all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters))
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe volumes.")
+
+ for volume in all_volumes["Volumes"]:
+ volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags'])
+ volume_dict_array.append(get_volume_info(volume, module.region))
+ module.exit_json(volumes=volume_dict_array)
+
+
+def main():
+ argument_spec = dict(filters=dict(default={}, type='dict'))
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('ec2')
+
+ list_ec2_volumes(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py
new file mode 100644
index 00000000..edfdf7be
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py
@@ -0,0 +1,537 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_dhcp_option
+version_added: 1.0.0
+short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
+ requested
+description:
+ - This module removes, or creates DHCP option sets, and can associate them to a VPC.
+ - Optionally, a new DHCP Options set can be created that converges a VPC's existing
+ DHCP option set with values provided.
+ - When dhcp_options_id is provided, the module will
+ 1. remove (with state='absent')
+ 2. ensure tags are applied (if state='present' and tags are provided
+ 3. attach it to a VPC (if state='present' and a vpc_id is provided.
+ - If any of the optional values are missing, they will either be treated
+ as a no-op (i.e., inherit what already exists for the VPC)
+ - To remove existing options while inheriting, supply an empty value
+ (e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
+author:
+ - "Joel Thompson (@joelthompson)"
+options:
+ domain_name:
+ description:
+ - The domain name to set in the DHCP option sets.
+ type: str
+ dns_servers:
+ description:
+ - A list of IP addresses to set the DNS servers for the VPC to.
+ type: list
+ elements: str
+ ntp_servers:
+ description:
+ - List of hosts to advertise as NTP servers for the VPC.
+ type: list
+ elements: str
+ netbios_name_servers:
+ description:
+ - List of hosts to advertise as NetBIOS servers.
+ type: list
+ elements: str
+ netbios_node_type:
+ description:
+ - NetBIOS node type to advertise in the DHCP options.
+ The AWS recommendation is to use 2 (when using netbios name services)
+ U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
+ type: int
+ vpc_id:
+ description:
+ - VPC ID to associate with the requested DHCP option set.
+ - If no VPC ID is provided, and no matching option set is found then a new
+ DHCP option set is created.
+ type: str
+ delete_old:
+ description:
+ - Whether to delete the old VPC DHCP option set when associating a new one.
+ - This is primarily useful for debugging/development purposes when you
+ want to quickly roll back to the old option set. Note that this setting
+ will be ignored, and the old DHCP option set will be preserved, if it
+ is in use by any other VPC. (Otherwise, AWS will return an error.)
+ type: bool
+ default: true
+ inherit_existing:
+ description:
+ - For any DHCP options not specified in these parameters, whether to
+ inherit them from the options set already applied to I(vpc_id), or to
+ reset them to be empty.
+ type: bool
+ default: false
+ dhcp_options_id:
+ description:
+ - The resource_id of an existing DHCP options set.
+ If this is specified, then it will override other settings, except tags
+ (which will be updated to match)
+ type: str
+ state:
+ description:
+ - create/assign or remove the DHCP options.
+ If state is set to absent, then a DHCP options set matched either
+ by id, or tags and options will be removed if possible.
+ default: present
+ choices: [ 'absent', 'present' ]
+ type: str
+notes:
+ - Support for I(purge_tags) was added in release 2.0.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+RETURN = """
+changed:
+ description: Whether the dhcp options were changed
+ type: bool
+ returned: always
+dhcp_options:
+ description: The DHCP options created, associated or found
+ returned: when available
+ type: dict
+ contains:
+ dhcp_configurations:
+ description: The DHCP configuration for the option set
+ type: list
+ sample:
+ - '{"key": "ntp-servers", "values": [{"value": "10.0.0.2" , "value": "10.0.1.2"}]}'
+ - '{"key": "netbios-name-servers", "values": [{value": "10.0.0.1"}, {"value": "10.0.1.1" }]}'
+ dhcp_options_id:
+ description: The aws resource id of the primary DCHP options set created or found
+ type: str
+ sample: "dopt-0955331de6a20dd07"
+ owner_id:
+ description: The ID of the AWS account that owns the DHCP options set.
+ type: str
+ sample: 012345678912
+ tags:
+ description: The tags to be applied to a DHCP options set
+ type: list
+ sample:
+ - '{"Key": "CreatedBy", "Value": "ansible-test"}'
+ - '{"Key": "Collection", "Value": "amazon.aws"}'
+dhcp_options_id:
+ description: The aws resource id of the primary DCHP options set created, found or removed
+ type: str
+ returned: when available
+dhcp_config:
+ description: The boto2-style DHCP options created, associated or found
+ returned: when available
+ type: dict
+ contains:
+ domain-name-servers:
+ description: The IP addresses of up to four domain name servers, or AmazonProvidedDNS.
+ returned: when available
+ type: list
+ sample:
+ - 10.0.0.1
+ - 10.0.1.1
+ domain-name:
+ description: The domain name for hosts in the DHCP option sets
+ returned: when available
+ type: list
+ sample:
+ - "my.example.com"
+ ntp-servers:
+ description: The IP addresses of up to four Network Time Protocol (NTP) servers.
+ returned: when available
+ type: list
+ sample:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios-name-servers:
+ description: The IP addresses of up to four NetBIOS name servers.
+ returned: when available
+ type: list
+ sample:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios-node-type:
+ description: The NetBIOS node type (1, 2, 4, or 8).
+ returned: when available
+ type: str
+ sample: 2
+"""
+
+EXAMPLES = """
+# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
+# DHCP option set that may have been attached to that VPC.
+- amazon.aws.ec2_vpc_dhcp_option:
+ domain_name: "foo.example.com"
+ region: us-east-1
+ dns_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ vpc_id: vpc-123456
+ delete_old: True
+ inherit_existing: False
+
+
+# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
+# keep any other existing settings. Also, keep the old DHCP option set around.
+- amazon.aws.ec2_vpc_dhcp_option:
+ region: us-east-1
+ dns_servers:
+ - "{{groups['dns-primary']}}"
+ - "{{groups['dns-secondary']}}"
+ vpc_id: vpc-123456
+ inherit_existing: True
+ delete_old: False
+
+
+## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
+## but do not assign to a VPC
+- amazon.aws.ec2_vpc_dhcp_option:
+ region: us-east-1
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: google servers
+ Environment: Test
+
+## Delete a DHCP options set that matches the tags and options specified
+- amazon.aws.ec2_vpc_dhcp_option:
+ region: us-east-1
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: google servers
+ Environment: Test
+ state: absent
+
+## Associate a DHCP options set with a VPC by ID
+- amazon.aws.ec2_vpc_dhcp_option:
+ region: us-east-1
+ dhcp_options_id: dopt-12345678
+ vpc_id: vpc-123456
+
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+
+def fetch_dhcp_options_for_vpc(client, module, vpc_id):
+ try:
+ vpcs = client.describe_vpcs(aws_retry=True, VpcIds=[vpc_id])['Vpcs']
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe vpc {0}".format(vpc_id))
+
+ if len(vpcs) != 1:
+ return None
+ try:
+ dhcp_options = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[vpcs[0]['DhcpOptionsId']])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe dhcp option {0}".format(vpcs[0]['DhcpOptionsId']))
+
+ if len(dhcp_options['DhcpOptions']) != 1:
+ return None
+ return dhcp_options['DhcpOptions'][0]['DhcpConfigurations'], dhcp_options['DhcpOptions'][0]['DhcpOptionsId']
+
+
+def remove_dhcp_options_by_id(client, module, dhcp_options_id):
+ changed = False
+ # First, check if this dhcp option is associated to any other vpcs
+ try:
+ associations = client.describe_vpcs(aws_retry=True, Filters=[{'Name': 'dhcp-options-id', 'Values': [dhcp_options_id]}])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe VPC associations for dhcp option id {0}".format(dhcp_options_id))
+ if len(associations['Vpcs']) > 0:
+ return changed
+
+ changed = True
+ if not module.check_mode:
+ try:
+ client.delete_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id)
+ except is_boto3_error_code('InvalidDhcpOptionsID.NotFound'):
+ return False
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to delete dhcp option {0}".format(dhcp_options_id))
+
+ return changed
+
+
+def match_dhcp_options(client, module, new_config):
+ """
+ Returns a DhcpOptionsId if the module parameters match; else None
+ Filter by tags, if any are specified
+ """
+ try:
+ all_dhcp_options = client.describe_dhcp_options(aws_retry=True)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe dhcp options")
+
+ for dopts in all_dhcp_options['DhcpOptions']:
+ if module.params['tags']:
+ # If we were given tags, try to match on them
+ boto_tags = ansible_dict_to_boto3_tag_list(module.params['tags'])
+ if dopts['DhcpConfigurations'] == new_config and dopts['Tags'] == boto_tags:
+ return True, dopts['DhcpOptionsId']
+ elif dopts['DhcpConfigurations'] == new_config:
+ return True, dopts['DhcpOptionsId']
+
+ return False, None
+
+
+def create_dhcp_config(module):
+ """
+ Convert provided parameters into a DhcpConfigurations list that conforms to what the API returns:
+ https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html
+ [{'Key': 'domain-name',
+ 'Values': [{'Value': 'us-west-2.compute.internal'}]},
+ {'Key': 'domain-name-servers',
+ 'Values': [{'Value': 'AmazonProvidedDNS'}]},
+ ...],
+ """
+ new_config = []
+ params = module.params
+ if params['domain_name'] is not None:
+ new_config.append({'Key': 'domain-name', 'Values': [{'Value': params['domain_name']}]})
+ if params['dns_servers'] is not None:
+ dns_server_list = []
+ for server in params['dns_servers']:
+ dns_server_list.append({'Value': server})
+ new_config.append({'Key': 'domain-name-servers', 'Values': dns_server_list})
+ if params['ntp_servers'] is not None:
+ ntp_server_list = []
+ for server in params['ntp_servers']:
+ ntp_server_list.append({'Value': server})
+ new_config.append({'Key': 'ntp-servers', 'Values': ntp_server_list})
+ if params['netbios_name_servers'] is not None:
+ netbios_server_list = []
+ for server in params['netbios_name_servers']:
+ netbios_server_list.append({'Value': server})
+ new_config.append({'Key': 'netbios-name-servers', 'Values': netbios_server_list})
+ if params['netbios_node_type'] is not None:
+ new_config.append({'Key': 'netbios-node-type', 'Values': params['netbios_node_type']})
+
+ return new_config
+
+
+def create_dhcp_option_set(client, module, new_config):
+ """
+ A CreateDhcpOptions object looks different than the object we create in create_dhcp_config()
+ This is the only place we use it, so create it now
+ https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateDhcpOptions.html
+ We have to do this after inheriting any existing_config, so we need to start with the object
+ that we made in create_dhcp_config().
+ normalize_config() gives us the nicest format to work with for this.
+ """
+ changed = True
+ desired_config = normalize_ec2_vpc_dhcp_config(new_config)
+ create_config = []
+ tags_list = []
+
+ for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']:
+ if desired_config.get(option):
+ create_config.append({'Key': option, 'Values': desired_config[option]})
+ if desired_config.get('netbios-node-type'):
+ # We need to listify this one
+ create_config.append({'Key': 'netbios-node-type', 'Values': [desired_config['netbios-node-type']]})
+
+ if module.params.get('tags'):
+ tags_list = boto3_tag_specifications(module.params['tags'], ['dhcp-options'])
+
+ try:
+ if not module.check_mode:
+ dhcp_options = client.create_dhcp_options(aws_retry=True, DhcpConfigurations=create_config, TagSpecifications=tags_list)
+ return changed, dhcp_options['DhcpOptions']['DhcpOptionsId']
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create dhcp option set")
+
+ return changed, None
+
+
+def find_opt_index(config, option):
+ return (next((i for i, item in enumerate(config) if item["Key"] == option), None))
+
+
+def inherit_dhcp_config(existing_config, new_config):
+ """
+ Compare two DhcpConfigurations lists and apply existing options to unset parameters
+
+ If there's an existing option config and the new option is not set or it's none,
+ inherit the existing config.
+ The configs are unordered lists of dicts with non-unique keys, so we have to find
+ the right list index for a given config option first.
+ """
+ changed = False
+ for option in ['domain-name', 'domain-name-servers', 'ntp-servers',
+ 'netbios-name-servers', 'netbios-node-type']:
+ existing_index = find_opt_index(existing_config, option)
+ new_index = find_opt_index(new_config, option)
+ # `if existing_index` evaluates to False on index 0, so be very specific and verbose
+ if existing_index is not None and new_index is None:
+ new_config.append(existing_config[existing_index])
+ changed = True
+
+ return changed, new_config
+
+
+def get_dhcp_options_info(client, module, dhcp_options_id):
+ # Return boto3-style details, consistent with the _info module
+
+ if module.check_mode and dhcp_options_id is None:
+ # We can't describe without an option id, we might get here when creating a new option set in check_mode
+ return None
+
+ try:
+ dhcp_option_info = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[dhcp_options_id])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe dhcp options")
+
+ dhcp_options_set = dhcp_option_info['DhcpOptions'][0]
+ dhcp_option_info = {'DhcpOptionsId': dhcp_options_set['DhcpOptionsId'],
+ 'DhcpConfigurations': dhcp_options_set['DhcpConfigurations'],
+ 'Tags': boto3_tag_list_to_ansible_dict(dhcp_options_set.get('Tags', [{'Value': '', 'Key': 'Name'}]))}
+ return camel_dict_to_snake_dict(dhcp_option_info, ignore_list=['Tags'])
+
+
+def associate_options(client, module, vpc_id, dhcp_options_id):
+ try:
+ if not module.check_mode:
+ client.associate_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id, VpcId=vpc_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to associate dhcp option {0} to VPC {1}".format(dhcp_options_id, vpc_id))
+
+
+def main():
+ argument_spec = dict(
+ dhcp_options_id=dict(type='str', default=None),
+ domain_name=dict(type='str', default=None),
+ dns_servers=dict(type='list', elements='str', default=None),
+ ntp_servers=dict(type='list', elements='str', default=None),
+ netbios_name_servers=dict(type='list', elements='str', default=None),
+ netbios_node_type=dict(type='int', default=None),
+ vpc_id=dict(type='str', default=None),
+ delete_old=dict(type='bool', default=True),
+ inherit_existing=dict(type='bool', default=False),
+ tags=dict(type='dict', default=None, aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ check_boto3=False,
+ supports_check_mode=True
+ )
+
+ vpc_id = module.params['vpc_id']
+ delete_old = module.params['delete_old']
+ inherit_existing = module.params['inherit_existing']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+ state = module.params['state']
+ dhcp_options_id = module.params['dhcp_options_id']
+
+ found = False
+ changed = False
+ new_config = create_dhcp_config(module)
+ existing_config = None
+ existing_id = None
+
+ client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ module.deprecate("The 'new_config' return key is deprecated and will be replaced by 'dhcp_config'. Both values are returned for now.",
+ date='2022-12-01', collection_name='amazon.aws')
+ if state == 'absent':
+ if not dhcp_options_id:
+ # Look up the option id first by matching the supplied options
+ dhcp_options_id = match_dhcp_options(client, module, new_config)
+ changed = remove_dhcp_options_by_id(client, module, dhcp_options_id)
+ module.exit_json(changed=changed, new_options={}, dhcp_options={})
+
+ if not dhcp_options_id:
+ # If we were given a vpc_id then we need to look at the configuration on that
+ if vpc_id:
+ existing_config, existing_id = fetch_dhcp_options_for_vpc(client, module, vpc_id)
+ # if we've been asked to inherit existing options, do that now
+ if inherit_existing and existing_config:
+ changed, new_config = inherit_dhcp_config(existing_config, new_config)
+ # Do the vpc's dhcp options already match what we're asked for? if so we are done
+ if existing_config:
+ if new_config == existing_config:
+ dhcp_options_id = existing_id
+ if tags or purge_tags:
+ changed |= ensure_ec2_tags(client, module, dhcp_options_id, resource_type='dhcp-options',
+ tags=tags, purge_tags=purge_tags)
+ return_config = normalize_ec2_vpc_dhcp_config(new_config)
+ results = get_dhcp_options_info(client, module, dhcp_options_id)
+ module.exit_json(changed=changed, new_options=return_config, dhcp_options_id=dhcp_options_id, dhcp_options=results)
+ # If no vpc_id was given, or the options don't match then look for an existing set using tags
+ found, dhcp_options_id = match_dhcp_options(client, module, new_config)
+
+ else:
+ # Now let's cover the case where there are existing options that we were told about by id
+ # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
+ try:
+ # Preserve the boto2 module's behaviour of checking if the option set exists first,
+ # and return the same error message if it does not
+ client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[dhcp_options_id])
+ # If that didn't fail, then we know the option ID exists
+ found = True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="a dhcp_options_id was supplied, but does not exist")
+
+ if not found:
+ # If we still don't have an options ID, create it
+ changed, dhcp_options_id = create_dhcp_option_set(client, module, new_config)
+ else:
+ if tags or purge_tags:
+ changed |= ensure_ec2_tags(client, module, dhcp_options_id, resource_type='dhcp-options',
+ tags=tags, purge_tags=purge_tags)
+
+ # If we were given a vpc_id, then attach the options we now have to that before we finish
+ if vpc_id:
+ associate_options(client, module, vpc_id, dhcp_options_id)
+ changed = (changed or True)
+
+ if delete_old and existing_id:
+ remove_dhcp_options_by_id(client, module, existing_id)
+
+ return_config = normalize_ec2_vpc_dhcp_config(new_config)
+ results = get_dhcp_options_info(client, module, dhcp_options_id)
+ module.exit_json(changed=changed, new_options=return_config, dhcp_options_id=dhcp_options_id, dhcp_options=results, dhcp_config=return_config)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py
new file mode 100644
index 00000000..19369763
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_dhcp_option_info
+version_added: 1.0.0
+short_description: Gather information about DHCP options sets in AWS
+description:
+ - Gather information about DHCP options sets in AWS.
+author: "Nick Aslanidis (@naslanidis)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html) for possible filters.
+ type: dict
+ dhcp_options_ids:
+ description:
+ - Get details of specific DHCP option IDs.
+ type: list
+ elements: str
+ dry_run:
+ description:
+ - Checks whether you have the required permissions to view the DHCP
+ options.
+ type: bool
+ default: false
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all DHCP Option sets for an account or profile
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ register: dhcp_info
+
+- name: Gather information about a filtered list of DHCP Option sets
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "abc-123"
+ register: dhcp_info
+
+- name: Gather information about a specific DHCP Option set by DhcpOptionId
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ dhcp_options_ids: dopt-123fece2
+ register: dhcp_info
+
+'''
+
+RETURN = '''
+dhcp_options:
+ description: The DHCP options created, associated or found.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ dhcp_configurations:
+ description: The DHCP configuration for the option set.
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: The name of a DHCP option.
+ returned: always
+ type: str
+ values:
+ description: List of values for the DHCP option.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ value:
+ description: The attribute value. This value is case-sensitive.
+ returned: always
+ type: str
+ sample:
+ - '{"key": "ntp-servers", "values": [{"value": "10.0.0.2" , "value": "10.0.1.2"}]}'
+ - '{"key": "netbios-name-servers", "values": [{value": "10.0.0.1"}, {"value": "10.0.1.1" }]}'
+ dhcp_options_id:
+ description: The aws resource id of the primary DHCP options set created or found.
+ type: str
+ sample: "dopt-0955331de6a20dd07"
+ owner_id:
+ description: The ID of the AWS account that owns the DHCP options set.
+ type: str
+ sample: 012345678912
+ tags:
+ description: The tags to be applied to a DHCP options set.
+ type: list
+ elements: dict
+ sample:
+ - '{"Key": "CreatedBy", "Value": "ansible-test"}'
+ - '{"Key": "Collection", "Value": "amazon.aws"}'
+dhcp_config:
+ description: The boto2-style DHCP options created, associated or found. Provided for consistency with ec2_vpc_dhcp_option's C(dhcp_config).
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ domain-name-servers:
+ description: The IP addresses of up to four domain name servers, or AmazonProvidedDNS.
+ returned: when available
+ type: list
+ sample:
+ - 10.0.0.1
+ - 10.0.1.1
+ domain-name:
+ description: The domain name for hosts in the DHCP option sets.
+ returned: when available
+ type: list
+ sample:
+ - "my.example.com"
+ ntp-servers:
+ description: The IP addresses of up to four Network Time Protocol (NTP) servers.
+ returned: when available
+ type: list
+ sample:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios-name-servers:
+ description: The IP addresses of up to four NetBIOS name servers.
+ returned: when available
+ type: list
+ sample:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios-node-type:
+ description: The NetBIOS node type (1, 2, 4, or 8).
+ returned: when available
+ type: str
+ sample: 2
+changed:
+ description: True if listing the dhcp options succeeds.
+ type: bool
+ returned: always
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config
+
+
+def get_dhcp_options_info(dhcp_option):
+ dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
+ 'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
+ 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))}
+ return dhcp_option_info
+
+
+def list_dhcp_options(client, module):
+ params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters')))
+
+ if module.params.get("dry_run"):
+ params['DryRun'] = True
+
+ if module.params.get("dhcp_options_ids"):
+ params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids")
+
+ try:
+ all_dhcp_options = client.describe_dhcp_options(aws_retry=True, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ normalized_config = [normalize_ec2_vpc_dhcp_config(config['DhcpConfigurations']) for config in all_dhcp_options['DhcpOptions']]
+ raw_config = [camel_dict_to_snake_dict(get_dhcp_options_info(option), ignore_list=['Tags']) for option in all_dhcp_options['DhcpOptions']]
+ return raw_config, normalized_config
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default={}),
+ dry_run=dict(type='bool', default=False),
+ dhcp_options_ids=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # call your function here
+ results, normalized_config = list_dhcp_options(client, module)
+
+ module.exit_json(dhcp_options=results, dhcp_config=normalized_config)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py
new file mode 100644
index 00000000..080610eb
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py
@@ -0,0 +1,482 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_endpoint
+short_description: Create and delete AWS VPC endpoints
+version_added: 1.0.0
+description:
+ - Creates AWS VPC endpoints.
+ - Deletes AWS VPC endpoints.
+ - This module supports check mode.
+options:
+ vpc_id:
+ description:
+ - Required when creating a VPC endpoint.
+ required: false
+ type: str
+ vpc_endpoint_type:
+ description:
+ - The type of endpoint.
+ required: false
+ default: Gateway
+ choices: [ "Interface", "Gateway", "GatewayLoadBalancer" ]
+ type: str
+ version_added: 1.5.0
+ vpc_endpoint_subnets:
+ description:
+ - The list of subnets to attach to the endpoint.
+ - Requires I(vpc_endpoint_type=GatewayLoadBalancer) or I(vpc_endpoint_type=Interface).
+ required: false
+ type: list
+ elements: str
+ version_added: 2.1.0
+ vpc_endpoint_security_groups:
+ description:
+ - The list of security groups to attach to the endpoint.
+ - Requires I(vpc_endpoint_type=GatewayLoadBalancer) or I(vpc_endpoint_type=Interface).
+ required: false
+ type: list
+ elements: str
+ version_added: 2.1.0
+ service:
+ description:
+ - An AWS supported VPC endpoint service. Use the M(amazon.aws.ec2_vpc_endpoint_info)
+ module to describe the supported endpoint services.
+ - Required when creating an endpoint.
+ required: false
+ type: str
+ policy:
+ description:
+ - A properly formatted JSON policy as string, see
+ U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813).
+ Cannot be used with I(policy_file).
+ - Option when creating an endpoint. If not provided AWS will
+ utilise a default policy which provides full access to the service.
+ required: false
+ type: json
+ policy_file:
+ description:
+ - The path to the properly json formatted policy file, see
+ U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813)
+ on how to use it properly. Cannot be used with I(policy).
+ - Option when creating an endpoint. If not provided AWS will
+ utilise a default policy which provides full access to the service.
+ - This option has been deprecated and will be removed after 2022-12-01
+ to maintain the existing functionality please use the I(policy) option
+ and a file lookup.
+ required: false
+ aliases: [ "policy_path" ]
+ type: path
+ state:
+ description:
+ - C(present) to ensure resource is created.
+ - C(absent) to remove resource.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ wait:
+ description:
+ - When specified, will wait for status to reach C(available) for I(state=present).
+ - Unfortunately this is ignored for delete actions due to a difference in
+ behaviour from AWS.
+ required: false
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - Used in conjunction with I(wait).
+ - Number of seconds to wait for status.
+ - Unfortunately this is ignored for delete actions due to a difference in
+ behaviour from AWS.
+ required: false
+ default: 320
+ type: int
+ route_table_ids:
+ description:
+ - List of one or more route table IDs to attach to the endpoint.
+ - A route is added to the route table with the destination of the
+ endpoint if provided.
+ - Route table IDs are only valid for C(Gateway) endpoints.
+ required: false
+ type: list
+ elements: str
+ vpc_endpoint_id:
+ description:
+ - One or more VPC endpoint IDs to remove from the AWS account.
+ - Required if I(state=absent).
+ required: false
+ type: str
+ client_token:
+ description:
+ - Optional client token to ensure idempotency.
+ required: false
+ type: str
+author:
+ - Karen Cheng (@Etherdaemon)
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 1.5.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create new vpc endpoint with a json template for policy
+ amazon.aws.ec2_vpc_endpoint:
+ state: present
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ service: com.amazonaws.ap-southeast-2.s3
+ policy: " {{ lookup( 'template', 'endpoint_policy.json.j2') }} "
+ route_table_ids:
+ - rtb-12345678
+ - rtb-87654321
+ register: new_vpc_endpoint
+
+- name: Create new vpc endpoint with the default policy
+ amazon.aws.ec2_vpc_endpoint:
+ state: present
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ service: com.amazonaws.ap-southeast-2.s3
+ route_table_ids:
+ - rtb-12345678
+ - rtb-87654321
+ register: new_vpc_endpoint
+
+- name: Create new vpc endpoint with json file
+ amazon.aws.ec2_vpc_endpoint:
+ state: present
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ service: com.amazonaws.ap-southeast-2.s3
+ policy_file: "{{ role_path }}/files/endpoint_policy.json"
+ route_table_ids:
+ - rtb-12345678
+ - rtb-87654321
+ register: new_vpc_endpoint
+
+- name: Delete newly created vpc endpoint
+ amazon.aws.ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}"
+ region: ap-southeast-2
+'''
+
+RETURN = r'''
+endpoints:
+ description: The resulting endpoints from the module call
+ returned: success
+ type: list
+ sample: [
+ {
+ "creation_timestamp": "2017-02-20T05:04:15+00:00",
+ "policy_document": {
+ "Id": "Policy1450910922815",
+ "Statement": [
+ {
+ "Action": "s3:*",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Resource": [
+ "arn:aws:s3:::*/*",
+ "arn:aws:s3:::*"
+ ],
+ "Sid": "Stmt1450910920641"
+ }
+ ],
+ "Version": "2012-10-17"
+ },
+ "route_table_ids": [
+ "rtb-abcd1234"
+ ],
+ "service_name": "com.amazonaws.ap-southeast-2.s3",
+ "vpc_endpoint_id": "vpce-a1b2c3d4",
+ "vpc_id": "vpc-abbad0d0"
+ }
+ ]
+'''
+
+import datetime
+import json
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+
+
+def get_endpoints(client, module, endpoint_id=None):
+ params = dict()
+ if endpoint_id:
+ params['VpcEndpointIds'] = [endpoint_id]
+ else:
+ filters = list()
+ if module.params.get('service'):
+ filters.append({'Name': 'service-name', 'Values': [module.params.get('service')]})
+ if module.params.get('vpc_id'):
+ filters.append({'Name': 'vpc-id', 'Values': [module.params.get('vpc_id')]})
+ params['Filters'] = filters
+ try:
+ result = client.describe_vpc_endpoints(aws_retry=True, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get endpoints")
+
+ # normalize iso datetime fields in result
+ normalized_result = normalize_boto3_result(result)
+ return normalized_result
+
+
+def match_endpoints(route_table_ids, service_name, vpc_id, endpoint):
+ found = False
+ sorted_route_table_ids = []
+
+ if route_table_ids:
+ sorted_route_table_ids = sorted(route_table_ids)
+
+ if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name:
+ sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds'])
+ if sorted_endpoint_rt_ids == sorted_route_table_ids:
+ found = True
+ return found
+
+
+def setup_creation(client, module):
+ endpoint_id = module.params.get('vpc_endpoint_id')
+ route_table_ids = module.params.get('route_table_ids')
+ service_name = module.params.get('service')
+ vpc_id = module.params.get('vpc_id')
+ changed = False
+
+ if not endpoint_id:
+ # Try to use the module parameters to match any existing endpoints
+ all_endpoints = get_endpoints(client, module, endpoint_id)
+ if len(all_endpoints['VpcEndpoints']) > 0:
+ for endpoint in all_endpoints['VpcEndpoints']:
+ if match_endpoints(route_table_ids, service_name, vpc_id, endpoint):
+ endpoint_id = endpoint['VpcEndpointId']
+ break
+
+ if endpoint_id:
+ # If we have an endpoint now, just ensure tags and exit
+ if module.params.get('tags'):
+ changed |= ensure_ec2_tags(client, module, endpoint_id,
+ resource_type='vpc-endpoint',
+ tags=module.params.get('tags'),
+ purge_tags=module.params.get('purge_tags'))
+ normalized_result = get_endpoints(client, module, endpoint_id=endpoint_id)['VpcEndpoints'][0]
+ return changed, camel_dict_to_snake_dict(normalized_result, ignore_list=['Tags'])
+
+ changed, result = create_vpc_endpoint(client, module)
+
+ return changed, camel_dict_to_snake_dict(result, ignore_list=['Tags'])
+
+
+def create_vpc_endpoint(client, module):
+ params = dict()
+ changed = False
+ token_provided = False
+ params['VpcId'] = module.params.get('vpc_id')
+ params['VpcEndpointType'] = module.params.get('vpc_endpoint_type')
+ params['ServiceName'] = module.params.get('service')
+
+ if module.params.get('vpc_endpoint_type') != 'Gateway' and module.params.get('route_table_ids'):
+ module.fail_json(msg="Route table IDs are only supported for Gateway type VPC Endpoint.")
+
+ if module.check_mode:
+ changed = True
+ result = 'Would have created VPC Endpoint if not in check mode'
+ module.exit_json(changed=changed, result=result)
+
+ if module.params.get('route_table_ids'):
+ params['RouteTableIds'] = module.params.get('route_table_ids')
+
+ if module.params.get('vpc_endpoint_subnets'):
+ params['SubnetIds'] = module.params.get('vpc_endpoint_subnets')
+
+ if module.params.get('vpc_endpoint_security_groups'):
+ params['SecurityGroupIds'] = module.params.get('vpc_endpoint_security_groups')
+
+ if module.params.get('client_token'):
+ token_provided = True
+ request_time = datetime.datetime.utcnow()
+ params['ClientToken'] = module.params.get('client_token')
+
+ policy = None
+ if module.params.get('policy'):
+ try:
+ policy = json.loads(module.params.get('policy'))
+ except ValueError as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ elif module.params.get('policy_file'):
+ try:
+ with open(module.params.get('policy_file'), 'r') as json_data:
+ policy = json.load(json_data)
+ except (OSError, json.JSONDecodeError) as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ if policy:
+ params['PolicyDocument'] = json.dumps(policy)
+
+ if module.params.get('tags'):
+ params["TagSpecifications"] = boto3_tag_specifications(module.params.get('tags'), ['vpc-endpoint'])
+
+ try:
+ changed = True
+ result = client.create_vpc_endpoint(aws_retry=True, **params)['VpcEndpoint']
+ if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)):
+ changed = False
+ elif module.params.get('wait') and not module.check_mode:
+ try:
+ waiter = get_waiter(client, 'vpc_endpoint_exists')
+ waiter.wait(VpcEndpointIds=[result['VpcEndpointId']], WaiterConfig=dict(Delay=15, MaxAttempts=module.params.get('wait_timeout') // 15))
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(msg='Error waiting for vpc endpoint to become available - please check the AWS console')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failure while waiting for status')
+
+ except is_boto3_error_code('IdempotentParameterMismatch'): # pylint: disable=duplicate-except
+ module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API")
+ except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except
+ module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to create VPC.")
+
+ # describe and normalize iso datetime fields in result after adding tags
+ normalized_result = get_endpoints(client, module, endpoint_id=result['VpcEndpointId'])['VpcEndpoints'][0]
+ return changed, normalized_result
+
+
+def setup_removal(client, module):
+ params = dict()
+ changed = False
+
+ if module.check_mode:
+ try:
+ exists = client.describe_vpc_endpoints(aws_retry=True, VpcEndpointIds=[module.params.get('vpc_endpoint_id')])
+ if exists:
+ result = {'msg': 'Would have deleted VPC Endpoint if not in check mode'}
+ changed = True
+ except is_boto3_error_code('InvalidVpcEndpointId.NotFound'):
+ result = {'msg': 'Endpoint does not exist, nothing to delete.'}
+ changed = False
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get endpoints")
+
+ return changed, result
+
+ if isinstance(module.params.get('vpc_endpoint_id'), string_types):
+ params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')]
+ else:
+ params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id')
+ try:
+ result = client.delete_vpc_endpoints(aws_retry=True, **params)['Unsuccessful']
+ if len(result) < len(params['VpcEndpointIds']):
+ changed = True
+ # For some reason delete_vpc_endpoints doesn't throw exceptions it
+ # returns a list of failed 'results' instead. Throw these so we can
+ # catch them the way we expect
+ for r in result:
+ try:
+ raise botocore.exceptions.ClientError(r, 'delete_vpc_endpoints')
+ except is_boto3_error_code('InvalidVpcEndpoint.NotFound'):
+ continue
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "Failed to delete VPC endpoint")
+ return changed, result
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(),
+ vpc_endpoint_type=dict(default='Gateway', choices=['Interface', 'Gateway', 'GatewayLoadBalancer']),
+ vpc_endpoint_security_groups=dict(type='list', elements='str'),
+ vpc_endpoint_subnets=dict(type='list', elements='str'),
+ service=dict(),
+ policy=dict(type='json'),
+ policy_file=dict(type='path', aliases=['policy_path']),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=320, required=False),
+ route_table_ids=dict(type='list', elements='str'),
+ vpc_endpoint_id=dict(),
+ client_token=dict(no_log=False),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['policy', 'policy_file']],
+ required_if=[
+ ['state', 'present', ['vpc_id', 'service']],
+ ['state', 'absent', ['vpc_endpoint_id']],
+ ],
+ )
+
+ # Validate Requirements
+ state = module.params.get('state')
+
+ if module.params.get('policy_file'):
+ module.deprecate('The policy_file option has been deprecated and'
+ ' will be removed after 2022-12-01',
+ date='2022-12-01', collection_name='amazon.aws')
+
+ if module.params.get('vpc_endpoint_type'):
+ if module.params.get('vpc_endpoint_type') == 'Gateway':
+ if module.params.get('vpc_endpoint_subnets') or module.params.get('vpc_endpoint_security_groups'):
+ module.fail_json(msg="Parameter vpc_endpoint_subnets and/or vpc_endpoint_security_groups can't be used with Gateway endpoint type")
+
+ if module.params.get('vpc_endpoint_type') == 'GatewayLoadBalancer':
+ if module.params.get('vpc_endpoint_security_groups'):
+ module.fail_json(msg="Parameter vpc_endpoint_security_groups can't be used with GatewayLoadBalancer endpoint type")
+
+ if module.params.get('vpc_endpoint_type') == 'Interface':
+ if module.params.get('vpc_endpoint_subnets') and not module.params.get('vpc_endpoint_security_groups'):
+ module.fail_json(msg="Parameter vpc_endpoint_security_groups must be set when endpoint type is Interface and vpc_endpoint_subnets is defined")
+ if not module.params.get('vpc_endpoint_subnets') and module.params.get('vpc_endpoint_security_groups'):
+ module.fail_json(msg="Parameter vpc_endpoint_subnets must be set when endpoint type is Interface and vpc_endpoint_security_groups is defined")
+
+ try:
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # Ensure resource is present
+ if state == 'present':
+ (changed, results) = setup_creation(ec2, module)
+ else:
+ (changed, results) = setup_removal(ec2, module)
+
+ module.exit_json(changed=changed, result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py
new file mode 100644
index 00000000..cc33f28d
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_endpoint_info
+short_description: Retrieves AWS VPC endpoints details using AWS methods
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC endpoints.
+options:
+ query:
+ description:
+ - Defaults to C(endpoints).
+ - Specifies the query action to take.
+ - I(query=endpoints) returns information about AWS VPC endpoints.
+ - Retrieving information about services using I(query=services) has been
+ deprecated in favour of the M(amazon.aws.ec2_vpc_endpoint_service_info) module.
+ - The I(query) option has been deprecated and will be removed after 2022-12-01.
+ required: False
+ choices:
+ - services
+ - endpoints
+ type: str
+ vpc_endpoint_ids:
+ description:
+ - The IDs of specific endpoints to retrieve the details of.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html)
+ for possible filters.
+ type: dict
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all support AWS services for VPC endpoints
+- name: List supported AWS endpoint services
+ amazon.aws.ec2_vpc_endpoint_info:
+ query: services
+ region: ap-southeast-2
+ register: supported_endpoint_services
+
+- name: Get all endpoints in ap-southeast-2 region
+ amazon.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ register: existing_endpoints
+
+- name: Get all endpoints with specific filters
+ amazon.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ filters:
+ vpc-id:
+ - vpc-12345678
+ - vpc-87654321
+ vpc-endpoint-state:
+ - available
+ - pending
+ register: existing_endpoints
+
+- name: Get details on specific endpoint
+ amazon.aws.ec2_vpc_endpoint_info:
+ query: endpoints
+ region: ap-southeast-2
+ vpc_endpoint_ids:
+ - vpce-12345678
+ register: endpoint_details
+'''
+
+RETURN = r'''
+service_names:
+ description: AWS VPC endpoint service names.
+ returned: I(query) is C(services)
+ type: list
+ elements: str
+ sample:
+ service_names:
+ - com.amazonaws.ap-southeast-2.s3
+vpc_endpoints:
+ description:
+ - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp,
+ policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id.
+ returned: I(query) is C(endpoints)
+ type: list
+ elements: dict
+ contains:
+ creation_timestamp:
+ description: The date and time that the endpoint was created.
+ returned: always
+ type: str
+ dns_entries:
+ description: List of DNS entires for the endpoint.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ dns_name:
+ description: The DNS name.
+ returned: always
+ type: str
+ hosted_zone_id:
+ description: The ID of the private hosted zone.
+ returned: always
+ type: str
+ groups:
+ description: List of security groups associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ network_interface_ids:
+ description: List of network interfaces for the endpoint.
+ returned: always
+ type: list
+ elements: str
+ owner_id:
+ description: The ID of the AWS account that owns the endpoint.
+ returned: always
+ type: str
+ policy_document:
+ description: The policy document associated with the endpoint.
+ returned: always
+ type: str
+ private_dns_enabled:
+ description: Indicates whether the VPC is associated with a private hosted zone.
+ returned: always
+ type: bool
+ requester_managed:
+ description: Indicated whether the endpoint is being managed by its service.
+ returned: always
+ type: bool
+ route_table_ids:
+ description: List of route table IDs associated with the endpoint.
+ returned: always
+ type: list
+ elements: str
+ service_name:
+ description: The name of the service to which the endpoint is associated.
+ returned: always
+ type: str
+ state:
+ description: The state of the endpoint.
+ returned: always
+ type: str
+ subnet_ids:
+ description: List of subnets associated with the endpoint.
+ returned: always
+ type: str
+ tags:
+ description: List of tags associated with the endpoint.
+ returned: always
+ type: list
+ elements: dict
+ vpc_endpoint_id:
+ description: The ID of the endpoint.
+ returned: always
+ type: str
+ vpc_endpoint_type:
+ description: The type of endpoint.
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC.
+ returned: always
+ type: str
+ sample:
+ vpc_endpoints:
+ - creation_timestamp: "2017-02-16T11:06:48+00:00"
+ policy_document: >
+ "{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\",
+ \"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\",
+ \"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}"
+ route_table_ids:
+ - rtb-abcd1234
+ service_name: "com.amazonaws.ap-southeast-2.s3"
+ state: "available"
+ vpc_endpoint_id: "vpce-abbad0d0"
+ vpc_id: "vpc-1111ffff"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+
+@AWSRetry.jittered_backoff()
+def _describe_endpoints(client, **params):
+ paginator = client.get_paginator('describe_vpc_endpoints')
+ return paginator.paginate(**params).build_full_result()
+
+
+@AWSRetry.jittered_backoff()
+def _describe_endpoint_services(client, **params):
+ paginator = client.get_paginator('describe_vpc_endpoint_services')
+ return paginator.paginate(**params).build_full_result()
+
+
+def get_supported_services(client, module):
+ try:
+ services = _describe_endpoint_services(client)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get endpoint servicess")
+
+ results = list(services['ServiceNames'])
+ return dict(service_names=results)
+
+
+def get_endpoints(client, module):
+ results = list()
+ params = dict()
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ if module.params.get('vpc_endpoint_ids'):
+ params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids')
+ try:
+ results = _describe_endpoints(client, **params)['VpcEndpoints']
+ results = normalize_boto3_result(results)
+ except is_boto3_error_code('InvalidVpcEndpointId.NotFound'):
+ module.exit_json(msg='VpcEndpoint {0} does not exist'.format(module.params.get('vpc_endpoint_ids')), vpc_endpoints=[])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get endpoints")
+
+ return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results])
+
+
+def main():
+ argument_spec = dict(
+ query=dict(choices=['services', 'endpoints'], required=False),
+ filters=dict(default={}, type='dict'),
+ vpc_endpoint_ids=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # Validate Requirements
+ try:
+ connection = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ query = module.params.get('query')
+ if query == 'endpoints':
+ module.deprecate('The query option has been deprecated and'
+ ' will be removed after 2022-12-01. Searching for'
+ ' `endpoints` is now the default and after'
+ ' 2022-12-01 this module will only support fetching'
+ ' endpoints.',
+ date='2022-12-01', collection_name='amazon.aws')
+ elif query == 'services':
+ module.deprecate('Support for fetching service information with this '
+ 'module has been deprecated and will be removed after'
+ ' 2022-12-01. '
+ 'Please use the ec2_vpc_endpoint_service_info module '
+ 'instead.', date='2022-12-01',
+ collection_name='amazon.aws')
+ else:
+ query = 'endpoints'
+
+ invocations = {
+ 'services': get_supported_services,
+ 'endpoints': get_endpoints,
+ }
+ results = invocations[query](connection, module)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py
new file mode 100644
index 00000000..0417fe39
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_endpoint_service_info
+short_description: Retrieves AWS VPC endpoint service details
+version_added: 1.5.0
+description:
+ - Gets details related to AWS VPC Endpoint Services.
+options:
+ filters:
+ description:
+ - A dict of filters to apply.
+ - Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpointServices.html)
+ for possible filters.
+ type: dict
+ service_names:
+ description:
+ - A list of service names which can be used to narrow the search results.
+ type: list
+ elements: str
+author:
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all supported AWS services for VPC endpoints
+- name: List supported AWS endpoint services
+ amazon.aws.ec2_vpc_endpoint_service_info:
+ region: ap-southeast-2
+ register: supported_endpoint_services
+'''
+
+RETURN = r'''
+service_names:
+ description: List of supported AWS VPC endpoint service names.
+ returned: success
+ type: list
+ sample:
+ service_names:
+ - com.amazonaws.ap-southeast-2.s3
+service_details:
+ description: Detailed information about the AWS VPC endpoint services.
+ returned: success
+ type: complex
+ contains:
+ service_name:
+ returned: success
+ description: The ARN of the endpoint service.
+ type: str
+ service_id:
+ returned: success
+ description: The ID of the endpoint service.
+ type: str
+ service_type:
+ returned: success
+ description: The type of the service
+ type: list
+ availability_zones:
+ returned: success
+ description: The Availability Zones in which the service is available.
+ type: list
+ owner:
+ returned: success
+ description: The AWS account ID of the service owner.
+ type: str
+ base_endpoint_dns_names:
+ returned: success
+ description: The DNS names for the service.
+ type: list
+ private_dns_name:
+ returned: success
+ description: The private DNS name for the service.
+ type: str
+ private_dns_names:
+ returned: success
+ description: The private DNS names assigned to the VPC endpoint service.
+ type: list
+ vpc_endpoint_policy_supported:
+ returned: success
+ description: Whether the service supports endpoint policies.
+ type: bool
+ acceptance_required:
+ returned: success
+ description:
+ Whether VPC endpoint connection requests to the service must be
+ accepted by the service owner.
+ type: bool
+ manages_vpc_endpoints:
+ returned: success
+ description: Whether the service manages its VPC endpoints.
+ type: bool
+ tags:
+ returned: success
+ description: A dict of tags associated with the service
+ type: dict
+ private_dns_name_verification_state:
+ returned: success
+ description:
+ - The verification state of the VPC endpoint service.
+ - Consumers of an endpoint service cannot use the private name when the state is not C(verified).
+ type: str
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+# We're using a paginator so we can't use the client decorators
+@AWSRetry.jittered_backoff()
+def get_services(client, module):
+ paginator = client.get_paginator('describe_vpc_endpoint_services')
+ params = {}
+ if module.params.get("filters"):
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ if module.params.get("service_names"):
+ params['ServiceNames'] = module.params.get("service_names")
+
+ results = paginator.paginate(**params).build_full_result()
+ return results
+
+
+def normalize_service(service):
+ normalized = camel_dict_to_snake_dict(service, ignore_list=['Tags'])
+ normalized["tags"] = boto3_tag_list_to_ansible_dict(service.get('Tags'))
+ return normalized
+
+
+def normalize_result(result):
+ normalized = {}
+ normalized['service_details'] = [normalize_service(service) for service in result.get('ServiceDetails')]
+ normalized['service_names'] = result.get('ServiceNames', [])
+ return normalized
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict'),
+ service_names=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # Validate Requirements
+ try:
+ client = module.client('ec2')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ try:
+ results = get_services(client, module)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to retrieve service details')
+ normalized_result = normalize_result(results)
+
+ module.exit_json(changed=False, **normalized_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py
new file mode 100644
index 00000000..99106b03
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_igw
+version_added: 1.0.0
+short_description: Manage an AWS VPC Internet gateway
+description:
+ - Manage an AWS VPC Internet gateway
+author: Robert Estelle (@erydo)
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC in which to manage the Internet Gateway.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or terminate the IGW
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+notes:
+- Support for I(purge_tags) was added in release 1.3.0.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.tags
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
+- name: Create Internet gateway
+ amazon.aws.ec2_vpc_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+ register: igw
+
+- name: Create Internet gateway with tags
+ amazon.aws.ec2_vpc_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+ tags:
+ Tag1: tag1
+ Tag2: tag2
+ register: igw
+
+- name: Delete Internet gateway
+ amazon.aws.ec2_vpc_igw:
+ state: absent
+ vpc_id: vpc-abcdefgh
+ register: vpc_igw_delete
+'''
+
+RETURN = '''
+changed:
+ description: If any changes have been made to the Internet Gateway.
+ type: bool
+ returned: always
+ sample:
+ changed: false
+gateway_id:
+ description: The unique identifier for the Internet Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ gateway_id: "igw-XXXXXXXX"
+tags:
+ description: The tags associated the Internet Gateway.
+ type: dict
+ returned: I(state=present)
+ sample:
+ tags:
+ "Ansible": "Test"
+vpc_id:
+ description: The VPC ID associated with the Internet Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpc_id: "vpc-XXXXXXXX"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10)
+def describe_igws_with_backoff(connection, **params):
+ paginator = connection.get_paginator('describe_internet_gateways')
+ return paginator.paginate(**params).build_full_result()['InternetGateways']
+
+
+class AnsibleEc2Igw():
+
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ self._connection = self._module.client(
+ 'ec2', retry_decorator=AWSRetry.jittered_backoff()
+ )
+ self._check_mode = self._module.check_mode
+
+ def process(self):
+ vpc_id = self._module.params.get('vpc_id')
+ state = self._module.params.get('state', 'present')
+ tags = self._module.params.get('tags')
+ purge_tags = self._module.params.get('purge_tags')
+
+ if state == 'present':
+ self.ensure_igw_present(vpc_id, tags, purge_tags)
+ elif state == 'absent':
+ self.ensure_igw_absent(vpc_id)
+
+ def get_matching_igw(self, vpc_id, gateway_id=None):
+ '''
+ Returns the internet gateway found.
+ Parameters:
+ vpc_id (str): VPC ID
+ gateway_id (str): Internet Gateway ID, if specified
+ Returns:
+ igw (dict): dict of igw found, None if none found
+ '''
+ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
+ try:
+ # If we know the gateway_id, use it to avoid bugs with using filters
+ # See https://github.com/ansible-collections/amazon.aws/pull/766
+ if not gateway_id:
+ igws = describe_igws_with_backoff(self._connection, Filters=filters)
+ else:
+ igws = describe_igws_with_backoff(self._connection, InternetGatewayIds=[gateway_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ igw = None
+ if len(igws) > 1:
+ self._module.fail_json(
+ msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'
+ .format(vpc_id))
+ elif igws:
+ igw = camel_dict_to_snake_dict(igws[0])
+
+ return igw
+
+ @staticmethod
+ def get_igw_info(igw, vpc_id):
+ return {
+ 'gateway_id': igw['internet_gateway_id'],
+ 'tags': boto3_tag_list_to_ansible_dict(igw['tags']),
+ 'vpc_id': vpc_id
+ }
+
+ def ensure_igw_absent(self, vpc_id):
+ igw = self.get_matching_igw(vpc_id)
+ if igw is None:
+ return self._results
+
+ if self._check_mode:
+ self._results['changed'] = True
+ return self._results
+
+ try:
+ self._results['changed'] = True
+ self._connection.detach_internet_gateway(
+ aws_retry=True,
+ InternetGatewayId=igw['internet_gateway_id'],
+ VpcId=vpc_id
+ )
+ self._connection.delete_internet_gateway(
+ aws_retry=True,
+ InternetGatewayId=igw['internet_gateway_id']
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway")
+
+ return self._results
+
+ def ensure_igw_present(self, vpc_id, tags, purge_tags):
+ igw = self.get_matching_igw(vpc_id)
+
+ if igw is None:
+ if self._check_mode:
+ self._results['changed'] = True
+ self._results['gateway_id'] = None
+ return self._results
+
+ try:
+ response = self._connection.create_internet_gateway(aws_retry=True)
+
+ # Ensure the gateway exists before trying to attach it or add tags
+ waiter = get_waiter(self._connection, 'internet_gateway_exists')
+ waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']])
+
+ igw = camel_dict_to_snake_dict(response['InternetGateway'])
+ self._connection.attach_internet_gateway(
+ aws_retry=True,
+ InternetGatewayId=igw['internet_gateway_id'],
+ VpcId=vpc_id
+ )
+
+ # Ensure the gateway is attached before proceeding
+ waiter = get_waiter(self._connection, 'internet_gateway_attached')
+ waiter.wait(InternetGatewayIds=[igw['internet_gateway_id']])
+ self._results['changed'] = True
+ except botocore.exceptions.WaiterError as e:
+ self._module.fail_json_aws(e, msg="No Internet Gateway exists.")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg='Unable to create Internet Gateway')
+
+ # Modify tags
+ self._results['changed'] |= ensure_ec2_tags(
+ self._connection, self._module, igw['internet_gateway_id'],
+ resource_type='internet-gateway', tags=tags, purge_tags=purge_tags,
+ retry_codes='InvalidInternetGatewayID.NotFound'
+ )
+
+ # Update igw
+ igw = self.get_matching_igw(vpc_id, gateway_id=igw['internet_gateway_id'])
+ igw_info = self.get_igw_info(igw, vpc_id)
+ self._results.update(igw_info)
+
+ return self._results
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ results = dict(
+ changed=False
+ )
+ igw_manager = AnsibleEc2Igw(module=module, results=results)
+ igw_manager.process()
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py
new file mode 100644
index 00000000..b3e34faf
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_igw_info
+version_added: 1.0.0
+short_description: Gather information about internet gateways in AWS
+description:
+ - Gather information about internet gateways in AWS.
+author: "Nick Aslanidis (@naslanidis)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters.
+ type: dict
+ internet_gateway_ids:
+ description:
+ - Get details of specific Internet Gateway ID. Provide this value as a list.
+ type: list
+ elements: str
+ convert_tags:
+ description:
+ - Convert tags from boto3 format (list of dictionaries) to the standard dictionary format.
+ - Prior to release 4.0.0 this defaulted to C(False).
+ default: True
+ type: bool
+ version_added: 1.3.0
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all Internet Gateways for an account or profile
+ amazon.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ register: igw_info
+
+- name: Gather information about a filtered list of Internet Gateways
+ amazon.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "igw-123"
+ register: igw_info
+
+- name: Gather information about a specific internet gateway by InternetGatewayId
+ amazon.aws.ec2_vpc_igw_info:
+ region: ap-southeast-2
+ profile: production
+ internet_gateway_ids: igw-c1231234
+ register: igw_info
+'''
+
+RETURN = r'''
+changed:
+ description: True if listing the internet gateways succeeds.
+ type: bool
+ returned: always
+ sample: "false"
+internet_gateways:
+ description: The internet gateways for the account.
+ returned: always
+ type: complex
+ contains:
+ attachments:
+ description: Any VPCs attached to the internet gateway.
+ returned: I(state=present)
+ type: complex
+ contains:
+ state:
+ description: The current state of the attachment.
+ returned: I(state=present)
+ type: str
+ sample: available
+ vpc_id:
+ description: The ID of the VPC.
+ returned: I(state=present)
+ type: str
+ sample: vpc-02123b67
+ internet_gateway_id:
+ description: The ID of the internet gateway.
+ returned: I(state=present)
+ type: str
+ sample: igw-2123634d
+ tags:
+ description: Any tags assigned to the internet gateway.
+ returned: I(state=present)
+ type: dict
+ sample:
+ tags:
+ "Ansible": "Test"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def get_internet_gateway_info(internet_gateway, convert_tags):
+ if convert_tags:
+ tags = boto3_tag_list_to_ansible_dict(internet_gateway['Tags'])
+ ignore_list = ["Tags"]
+ else:
+ tags = internet_gateway['Tags']
+ ignore_list = []
+ internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'],
+ 'Attachments': internet_gateway['Attachments'],
+ 'Tags': tags}
+
+ internet_gateway_info = camel_dict_to_snake_dict(internet_gateway_info, ignore_list=ignore_list)
+ return internet_gateway_info
+
+
+def list_internet_gateways(connection, module):
+ params = dict()
+
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ convert_tags = module.params.get('convert_tags')
+
+ if module.params.get("internet_gateway_ids"):
+ params['InternetGatewayIds'] = module.params.get("internet_gateway_ids")
+
+ try:
+ all_internet_gateways = connection.describe_internet_gateways(aws_retry=True, **params)
+ except is_boto3_error_code('InvalidInternetGatewayID.NotFound'):
+ module.fail_json('InternetGateway not found')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, 'Unable to describe internet gateways')
+
+ return [get_internet_gateway_info(igw, convert_tags)
+ for igw in all_internet_gateways['InternetGateways']]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default=dict()),
+ internet_gateway_ids=dict(type='list', default=None, elements='str'),
+ convert_tags=dict(type='bool', default=True),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # Validate Requirements
+ try:
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # call your function here
+ results = list_internet_gateways(connection, module)
+
+ module.exit_json(internet_gateways=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py
new file mode 100644
index 00000000..e818998f
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py
@@ -0,0 +1,949 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_nat_gateway
+version_added: 1.0.0
+short_description: Manage AWS VPC NAT Gateways
+description:
+ - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
+options:
+ state:
+ description:
+ - Ensure NAT Gateway is present or absent.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ nat_gateway_id:
+ description:
+ - The id AWS dynamically allocates to the NAT Gateway on creation.
+ This is required when the absent option is present.
+ type: str
+ subnet_id:
+ description:
+ - The id of the subnet to create the NAT Gateway in. This is required
+ with the present option.
+ type: str
+ allocation_id:
+ description:
+ - The id of the elastic IP allocation. If this is not passed and the
+ eip_address is not passed. An EIP is generated for this NAT Gateway.
+ type: str
+ eip_address:
+ description:
+ - The elastic IP address of the EIP you want attached to this NAT Gateway.
+ If this is not passed and the allocation_id is not passed,
+ an EIP is generated for this NAT Gateway.
+ type: str
+ if_exist_do_not_create:
+ description:
+ - if a NAT Gateway exists already in the subnet_id, then do not create a new one.
+ required: false
+ default: false
+ type: bool
+ release_eip:
+ description:
+ - Deallocate the EIP from the VPC.
+ - Option is only valid with the absent state.
+ - You should use this with the wait option. Since you can not release an address while a delete operation is happening.
+ default: false
+ type: bool
+ wait:
+ description:
+ - Wait for operation to complete before returning.
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - How many seconds to wait for an operation to complete before timing out.
+ default: 320
+ type: int
+ client_token:
+ description:
+ - Optional unique token to be used during create to ensure idempotency.
+ When specifying this option, ensure you specify the eip_address parameter
+ as well otherwise any subsequent runs will fail.
+ type: str
+author:
+ - Allen Sanabria (@linuxdynasty)
+ - Jon Hadfield (@jonhadfield)
+ - Karen Cheng (@Etherdaemon)
+ - Alina Buzachis (@alinabuzachis)
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 1.4.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create new nat gateway with client token.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ eip_address: 52.1.1.1
+ region: ap-southeast-2
+ client_token: abcd-12345678
+ register: new_nat_gateway
+
+- name: Create new nat gateway using an allocation-id.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ allocation_id: eipalloc-12345678
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway, using an EIP address and wait for available status.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ eip_address: 52.1.1.1
+ wait: true
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway and allocate new EIP.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ wait: true
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ wait: true
+ region: ap-southeast-2
+ if_exist_do_not_create: true
+ register: new_nat_gateway
+
+- name: Delete nat gateway using discovered nat gateways from facts module.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: absent
+ region: ap-southeast-2
+ wait: true
+ nat_gateway_id: "{{ item.NatGatewayId }}"
+ release_eip: true
+ register: delete_nat_gateway_result
+ loop: "{{ gateways_to_remove.result }}"
+
+- name: Delete nat gateway and wait for deleted status.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: absent
+ nat_gateway_id: nat-12345678
+ wait: true
+ wait_timeout: 500
+ region: ap-southeast-2
+
+- name: Delete nat gateway and release EIP.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: absent
+ nat_gateway_id: nat-12345678
+ release_eip: true
+ wait: true
+ wait_timeout: 300
+ region: ap-southeast-2
+
+- name: Create new nat gateway using allocation-id and tags.
+ amazon.aws.ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ allocation_id: eipalloc-12345678
+ region: ap-southeast-2
+ tags:
+ Tag1: tag1
+ Tag2: tag2
+ register: new_nat_gateway
+
+- name: Update tags without purge
+ amazon.aws.ec2_vpc_nat_gateway:
+ subnet_id: subnet-12345678
+ allocation_id: eipalloc-12345678
+ region: ap-southeast-2
+ purge_tags: false
+ tags:
+ Tag3: tag3
+ wait: true
+ register: update_tags_nat_gateway
+'''
+
+RETURN = r'''
+create_time:
+ description: The ISO 8601 date time format in UTC.
+ returned: In all cases.
+ type: str
+ sample: "2016-03-05T05:19:20.282000+00:00'"
+nat_gateway_id:
+ description: id of the VPC NAT Gateway
+ returned: In all cases.
+ type: str
+ sample: "nat-0d1e3a878585988f8"
+subnet_id:
+ description: id of the Subnet
+ returned: In all cases.
+ type: str
+ sample: "subnet-12345"
+state:
+ description: The current state of the NAT Gateway.
+ returned: In all cases.
+ type: str
+ sample: "available"
+tags:
+ description: The tags associated the VPC NAT Gateway.
+ type: dict
+ returned: When tags are present.
+ sample:
+ tags:
+ "Ansible": "Test"
+vpc_id:
+ description: id of the VPC.
+ returned: In all cases.
+ type: str
+ sample: "vpc-12345"
+nat_gateway_addresses:
+ description: List of dictionaries containing the public_ip, network_interface_id, private_ip, and allocation_id.
+ returned: In all cases.
+ type: str
+ sample: [
+ {
+ 'public_ip': '52.52.52.52',
+ 'network_interface_id': 'eni-12345',
+ 'private_ip': '10.0.0.100',
+ 'allocation_id': 'eipalloc-12345'
+ }
+ ]
+'''
+
+import datetime
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_nat_gateways(client, **params):
+ try:
+ paginator = client.get_paginator('describe_nat_gateways')
+ return paginator.paginate(**params).build_full_result()['NatGateways']
+ except is_boto3_error_code('InvalidNatGatewayID.NotFound'):
+ return None
+
+
+def wait_for_status(client, module, waiter_name, nat_gateway_id):
+ wait_timeout = module.params.get('wait_timeout')
+ try:
+ waiter = get_waiter(client, waiter_name)
+ attempts = 1 + int(wait_timeout / waiter.config.delay)
+ waiter.wait(
+ NatGatewayIds=[nat_gateway_id],
+ WaiterConfig={'MaxAttempts': attempts}
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg="NAT gateway failed to reach expected state.")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for NAT gateway state to update.")
+
+
+def get_nat_gateways(client, module, subnet_id=None, nat_gateway_id=None, states=None):
+ """Retrieve a list of NAT Gateways
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ module: AnsibleAWSModule class instance
+
+ Kwargs:
+ subnet_id (str): The subnet_id the nat resides in.
+ nat_gateway_id (str): The Amazon NAT id.
+ states (list): States available (pending, failed, available, deleting, and deleted)
+ default=None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> module = AnsibleAWSModule(...)
+ >>> subnet_id = 'subnet-12345678'
+ >>> get_nat_gateways(client, module, subnet_id)
+ [
+ {
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "nat_gateway_id": "nat-123456789",
+ "state": "deleted",
+ "subnet_id": "subnet-123456789",
+ "tags": {},
+ "vpc_id": "vpc-12345678"
+ }
+ ]
+
+ Returns:
+ list
+ """
+
+ params = dict()
+ existing_gateways = list()
+
+ if not states:
+ states = ['available', 'pending']
+ if nat_gateway_id:
+ params['NatGatewayIds'] = [nat_gateway_id]
+ else:
+ params['Filter'] = [
+ {
+ 'Name': 'subnet-id',
+ 'Values': [subnet_id]
+ },
+ {
+ 'Name': 'state',
+ 'Values': states
+ }
+ ]
+
+ try:
+ gateways = _describe_nat_gateways(client, **params)
+ if gateways:
+ for gw in gateways:
+ existing_gateways.append(camel_dict_to_snake_dict(gw))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ return existing_gateways
+
+
+def gateway_in_subnet_exists(client, module, subnet_id, allocation_id=None):
+ """Retrieve all NAT Gateways for a subnet.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ module: AnsibleAWSModule class instance
+ subnet_id (str): The subnet_id the nat resides in.
+
+ Kwargs:
+ allocation_id (str): The EIP Amazon identifier.
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> module = AnsibleAWSModule(...)
+ >>> subnet_id = 'subnet-1234567'
+ >>> allocation_id = 'eipalloc-1234567'
+ >>> gateway_in_subnet_exists(client, module, subnet_id, allocation_id)
+ (
+ [
+ {
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "nat_gateway_id": "nat-123456789",
+ "state": "deleted",
+ "subnet_id": "subnet-123456789",
+ "tags": {},
+ "vpc_id": "vpc-1234567"
+ }
+ ],
+ False
+ )
+
+ Returns:
+ Tuple (list, bool)
+ """
+
+ allocation_id_exists = False
+ gateways = []
+ states = ['available', 'pending']
+
+ gws_retrieved = (get_nat_gateways(client, module, subnet_id, states=states))
+
+ if gws_retrieved:
+ for gw in gws_retrieved:
+ for address in gw['nat_gateway_addresses']:
+ if allocation_id:
+ if address.get('allocation_id') == allocation_id:
+ allocation_id_exists = True
+ gateways.append(gw)
+ else:
+ gateways.append(gw)
+
+ return gateways, allocation_id_exists
+
+
+def get_eip_allocation_id_by_address(client, module, eip_address):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ module: AnsibleAWSModule class instance
+ eip_address (str): The Elastic IP Address of the EIP.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> module = AnsibleAWSModule(...)
+ >>> eip_address = '52.87.29.36'
+ >>> get_eip_allocation_id_by_address(client, module, eip_address)
+ (
+ 'eipalloc-36014da3', ''
+ )
+
+ Returns:
+ Tuple (str, str)
+ """
+
+ params = {
+ 'PublicIps': [eip_address],
+ }
+ allocation_id = None
+ msg = ''
+
+ try:
+ allocations = client.describe_addresses(aws_retry=True, **params)['Addresses']
+
+ if len(allocations) == 1:
+ allocation = allocations[0]
+ else:
+ allocation = None
+
+ if allocation:
+ if allocation.get('Domain') != 'vpc':
+ msg = (
+ "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
+ .format(eip_address)
+ )
+ else:
+ allocation_id = allocation.get('AllocationId')
+
+ except is_boto3_error_code('InvalidAddress.Malformed'):
+ module.fail_json(msg='EIP address {0} is invalid.'.format(eip_address))
+ except is_boto3_error_code('InvalidAddress.NotFound'): # pylint: disable=duplicate-except
+ msg = (
+ "EIP {0} does not exist".format(eip_address)
+ )
+ allocation_id = None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to describe EIP")
+
+ return allocation_id, msg
+
+
+def allocate_eip_address(client, module):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ module: AnsibleAWSModule class instance
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> module = AnsibleAWSModule(...)
+ >>> allocate_eip_address(client, module)
+ (
+ True, '', ''
+ )
+
+ Returns:
+ Tuple (bool, str, str)
+ """
+
+ new_eip = None
+ msg = ''
+ params = {
+ 'Domain': 'vpc',
+ }
+
+ if module.check_mode:
+ ip_allocated = True
+ new_eip = None
+ return ip_allocated, msg, new_eip
+
+ try:
+ new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId']
+ ip_allocated = True
+ msg = 'eipalloc id {0} created'.format(new_eip)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ return ip_allocated, msg, new_eip
+
+
+def release_address(client, module, allocation_id):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ module: AnsibleAWSModule class instance
+ allocation_id (str): The eip Amazon identifier.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> module = AnsibleAWSModule(...)
+ >>> allocation_id = "eipalloc-123456"
+ >>> release_address(client, module, allocation_id)
+ (
+ True, ''
+ )
+
+ Returns:
+ Tuple (bool, str)
+ """
+
+ msg = ''
+
+ if module.check_mode:
+ return True, ''
+
+ ip_released = False
+
+ try:
+ client.describe_addresses(aws_retry=True, AllocationIds=[allocation_id])
+ except is_boto3_error_code('InvalidAllocationID.NotFound') as e:
+ # IP address likely already released
+ # Happens with gateway in 'deleted' state that
+ # still lists associations
+ return True, e
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+ try:
+ client.release_address(aws_retry=True, AllocationId=allocation_id)
+ ip_released = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ return ip_released, msg
+
+
+def create(client, module, subnet_id, allocation_id, tags, client_token=None,
+ wait=False):
+ """Create an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ module: AnsibleAWSModule class instance
+ subnet_id (str): The subnet_id the nat resides in
+ allocation_id (str): The eip Amazon identifier
+ tags (dict): Tags to associate to the NAT gateway
+ purge_tags (bool): If true, remove tags not listed in I(tags)
+ type: bool
+
+ Kwargs:
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ default = False
+ client_token (str):
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> module = AnsibleAWSModule(...)
+ >>> subnet_id = 'subnet-1234567'
+ >>> allocation_id = 'eipalloc-1234567'
+ >>> create(client, module, subnet_id, allocation_id, wait=True)
+ [
+ true,
+ {
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "nat_gateway_id": "nat-123456789",
+ "state": "deleted",
+ "subnet_id": "subnet-1234567",
+ "tags": {},
+ "vpc_id": "vpc-1234567"
+ },
+ ""
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+
+ params = {
+ 'SubnetId': subnet_id,
+ 'AllocationId': allocation_id
+ }
+ request_time = datetime.datetime.utcnow()
+ changed = False
+ token_provided = False
+ result = {}
+ msg = ''
+
+ if client_token:
+ token_provided = True
+ params['ClientToken'] = client_token
+
+ if tags:
+ params["TagSpecifications"] = boto3_tag_specifications(tags, ['natgateway'])
+
+ if module.check_mode:
+ changed = True
+ return changed, result, msg
+
+ try:
+ result = camel_dict_to_snake_dict(
+ client.create_nat_gateway(aws_retry=True, **params)["NatGateway"]
+ )
+ changed = True
+
+ create_time = result['create_time'].replace(tzinfo=None)
+
+ if token_provided and (request_time > create_time):
+ changed = False
+
+ elif wait and result.get('state') != 'available':
+ wait_for_status(client, module, 'nat_gateway_available', result['nat_gateway_id'])
+
+ # Get new result
+ result = camel_dict_to_snake_dict(
+ _describe_nat_gateways(client, NatGatewayIds=[result['nat_gateway_id']])[0]
+ )
+
+ except is_boto3_error_code('IdempotentParameterMismatch') as e:
+ msg = (
+ 'NAT Gateway does not support update and token has already been provided:' + e
+ )
+ changed = False
+ result = None
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+ result['tags'] = describe_ec2_tags(client, module, result['nat_gateway_id'],
+ resource_type='natgateway')
+
+ return changed, result, msg
+
+
+def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, eip_address=None,
+ if_exist_do_not_create=False, wait=False, client_token=None):
+ """Create an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ module: AnsibleAWSModule class instance
+ subnet_id (str): The subnet_id the nat resides in
+ tags (dict): Tags to associate to the NAT gateway
+ purge_tags (bool): If true, remove tags not listed in I(tags)
+
+ Kwargs:
+ allocation_id (str): The EIP Amazon identifier.
+ default = None
+ eip_address (str): The Elastic IP Address of the EIP.
+ default = None
+ if_exist_do_not_create (bool): if a nat gateway already exists in this
+ subnet, than do not create another one.
+ default = False
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ default = False
+ client_token (str):
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> module = AnsibleAWSModule(...)
+ >>> subnet_id = 'subnet-w4t12897'
+ >>> allocation_id = 'eipalloc-36014da3'
+ >>> pre_create(client, module, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True)
+ [
+ true,
+ "",
+ {
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "52.87.29.36",
+ "network_interface_id": "eni-5579742d",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-36014da3"
+ }
+ ],
+ "nat_gateway_id": "nat-03835afb6e31df79b",
+ "state": "deleted",
+ "subnet_id": "subnet-w4t12897",
+ "tags": {},
+ "vpc_id": "vpc-w68571b5"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+
+ changed = False
+ msg = ''
+ results = {}
+
+ if not allocation_id and not eip_address:
+ existing_gateways, allocation_id_exists = (
+ gateway_in_subnet_exists(client, module, subnet_id)
+ )
+
+ if len(existing_gateways) > 0 and if_exist_do_not_create:
+ results = existing_gateways[0]
+ changed |= ensure_ec2_tags(client, module, results['nat_gateway_id'],
+ resource_type='natgateway', tags=tags,
+ purge_tags=purge_tags)
+
+ results['tags'] = describe_ec2_tags(client, module, results['nat_gateway_id'],
+ resource_type='natgateway')
+
+ if changed:
+ return changed, msg, results
+
+ changed = False
+ msg = (
+ 'NAT Gateway {0} already exists in subnet_id {1}'
+ .format(
+ existing_gateways[0]['nat_gateway_id'], subnet_id
+ )
+ )
+ return changed, msg, results
+ else:
+ changed, msg, allocation_id = (
+ allocate_eip_address(client, module)
+ )
+
+ if not changed:
+ return changed, msg, dict()
+
+ elif eip_address or allocation_id:
+ if eip_address and not allocation_id:
+ allocation_id, msg = (
+ get_eip_allocation_id_by_address(
+ client, module, eip_address
+ )
+ )
+ if not allocation_id:
+ changed = False
+ return changed, msg, dict()
+
+ existing_gateways, allocation_id_exists = (
+ gateway_in_subnet_exists(
+ client, module, subnet_id, allocation_id
+ )
+ )
+
+ if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
+ results = existing_gateways[0]
+ changed |= ensure_ec2_tags(client, module, results['nat_gateway_id'],
+ resource_type='natgateway', tags=tags,
+ purge_tags=purge_tags)
+
+ results['tags'] = describe_ec2_tags(client, module, results['nat_gateway_id'],
+ resource_type='natgateway')
+
+ if changed:
+ return changed, msg, results
+
+ changed = False
+ msg = (
+ 'NAT Gateway {0} already exists in subnet_id {1}'
+ .format(
+ existing_gateways[0]['nat_gateway_id'], subnet_id
+ )
+ )
+ return changed, msg, results
+
+ changed, results, msg = create(
+ client, module, subnet_id, allocation_id, tags, client_token, wait
+ )
+
+ return changed, msg, results
+
+
+def remove(client, module, nat_gateway_id, wait=False, release_eip=False):
+ """Delete an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ module: AnsibleAWSModule class instance
+ nat_gateway_id (str): The Amazon nat id
+
+ Kwargs:
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> module = AnsibleAWSModule(...)
+ >>> nat_gw_id = 'nat-03835afb6e31df79b'
+ >>> remove(client, module, nat_gw_id, wait=True, release_eip=True)
+ [
+ true,
+ "",
+ {
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "52.87.29.36",
+ "network_interface_id": "eni-5579742d",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-36014da3"
+ }
+ ],
+ "nat_gateway_id": "nat-03835afb6e31df79b",
+ "state": "deleted",
+ "subnet_id": "subnet-w4t12897",
+ "tags": {},
+ "vpc_id": "vpc-w68571b5"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+
+ params = {
+ 'NatGatewayId': nat_gateway_id
+ }
+ changed = False
+ results = {}
+ states = ['pending', 'available']
+ msg = ''
+
+ if module.check_mode:
+ changed = True
+ return changed, msg, results
+
+ try:
+ gw_list = (
+ get_nat_gateways(
+ client, module, nat_gateway_id=nat_gateway_id,
+ states=states
+ )
+ )
+
+ if len(gw_list) == 1:
+ results = gw_list[0]
+ client.delete_nat_gateway(aws_retry=True, **params)
+ allocation_id = (
+ results['nat_gateway_addresses'][0]['allocation_id']
+ )
+ changed = True
+ msg = (
+ 'NAT gateway {0} is in a deleting state. Delete was successful'
+ .format(nat_gateway_id)
+ )
+
+ if wait and results.get('state') != 'deleted':
+ wait_for_status(client, module, 'nat_gateway_deleted', nat_gateway_id)
+
+ # Get new results
+ results = camel_dict_to_snake_dict(
+ _describe_nat_gateways(client, NatGatewayIds=[nat_gateway_id])[0]
+ )
+ results['tags'] = describe_ec2_tags(client, module, nat_gateway_id,
+ resource_type='natgateway')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ if release_eip:
+ eip_released, msg = (
+ release_address(client, module, allocation_id))
+ if not eip_released:
+ module.fail_json(
+ msg="Failed to release EIP {0}: {1}".format(allocation_id, msg)
+ )
+
+ return changed, msg, results
+
+
+def main():
+ argument_spec = dict(
+ subnet_id=dict(type='str'),
+ eip_address=dict(type='str'),
+ allocation_id=dict(type='str'),
+ if_exist_do_not_create=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=320, required=False),
+ release_eip=dict(type='bool', default=False),
+ nat_gateway_id=dict(type='str'),
+ client_token=dict(type='str', no_log=False),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['allocation_id', 'eip_address']
+ ],
+ required_if=[['state', 'absent', ['nat_gateway_id']],
+ ['state', 'present', ['subnet_id']]],
+ )
+
+ state = module.params.get('state').lower()
+ subnet_id = module.params.get('subnet_id')
+ allocation_id = module.params.get('allocation_id')
+ eip_address = module.params.get('eip_address')
+ nat_gateway_id = module.params.get('nat_gateway_id')
+ wait = module.params.get('wait')
+ release_eip = module.params.get('release_eip')
+ client_token = module.params.get('client_token')
+ if_exist_do_not_create = module.params.get('if_exist_do_not_create')
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+
+ try:
+ client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS.')
+
+ changed = False
+ msg = ''
+
+ if state == 'present':
+ changed, msg, results = (
+ pre_create(
+ client, module, subnet_id, tags, purge_tags, allocation_id, eip_address,
+ if_exist_do_not_create, wait, client_token
+ )
+ )
+ else:
+ changed, msg, results = (
+ remove(
+ client, module, nat_gateway_id, wait, release_eip
+ )
+ )
+
+ module.exit_json(msg=msg, changed=changed, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py
new file mode 100644
index 00000000..b3178230
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: ec2_vpc_nat_gateway_info
+short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods
+version_added: 1.0.0
+description:
+ - Gets various details related to AWS VPC Managed Nat Gateways
+options:
+ nat_gateway_ids:
+ description:
+ - List of specific nat gateway IDs to fetch details for.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html)
+ for possible filters.
+ type: dict
+author: Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all nat gateways
+- name: List all managed nat gateways in ap-southeast-2
+ amazon.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ register: all_ngws
+
+- name: Debugging the result
+ ansible.builtin.debug:
+ msg: "{{ all_ngws.result }}"
+
+- name: Get details on specific nat gateways
+ amazon.aws.ec2_vpc_nat_gateway_info:
+ nat_gateway_ids:
+ - nat-1234567891234567
+ - nat-7654321987654321
+ region: ap-southeast-2
+ register: specific_ngws
+
+- name: Get all nat gateways with specific filters
+ amazon.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ filters:
+ state: ['pending']
+ register: pending_ngws
+
+- name: Get nat gateways with specific filter
+ amazon.aws.ec2_vpc_nat_gateway_info:
+ region: ap-southeast-2
+ filters:
+ subnet-id: subnet-12345678
+ state: ['available']
+ register: existing_nat_gateways
+'''
+
+RETURN = r'''
+changed:
+ description: True if listing the internet gateways succeeds.
+ type: bool
+ returned: always
+ sample: false
+result:
+ description:
+ - The result of the describe, converted to ansible snake case style.
+ - See also U(http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways).
+ returned: suceess
+ type: list
+ contains:
+ create_time:
+ description: The date and time the NAT gateway was created.
+ returned: always
+ type: str
+ sample: "2021-03-11T22:43:25+00:00"
+ delete_time:
+ description: The date and time the NAT gateway was deleted.
+ returned: when the NAT gateway has been deleted
+ type: str
+ sample: "2021-03-11T22:43:25+00:00"
+ nat_gateway_addresses:
+ description: List containing a dictionary with the IP addresses and network interface associated with the NAT gateway.
+ returned: always
+ type: dict
+ contains:
+ allocation_id:
+ description: The allocation ID of the Elastic IP address that's associated with the NAT gateway.
+ returned: always
+ type: str
+ sample: eipalloc-0853e66a40803da76
+ network_interface_id:
+ description: The ID of the network interface associated with the NAT gateway.
+ returned: always
+ type: str
+ sample: eni-0a37acdbe306c661c
+ private_ip:
+ description: The private IP address associated with the Elastic IP address.
+ returned: always
+ type: str
+ sample: 10.0.238.227
+ public_ip:
+ description: The Elastic IP address associated with the NAT gateway.
+ returned: always
+ type: str
+ sample: 34.204.123.52
+ nat_gateway_id:
+ description: The ID of the NAT gateway.
+ returned: always
+ type: str
+ sample: nat-0c242a2397acf6173
+ state:
+ description: state of the NAT gateway.
+ returned: always
+ type: str
+ sample: available
+ subnet_id:
+ description: The ID of the subnet in which the NAT gateway is located.
+ returned: always
+ type: str
+ sample: subnet-098c447465d4344f9
+ vpc_id:
+ description: The ID of the VPC in which the NAT gateway is located.
+ returned: always
+ type: str
+ sample: vpc-02f37f48438ab7d4c
+ tags:
+ description: Tags applied to the NAT gateway.
+ returned: always
+ type: dict
+ sample:
+ Tag1: tag1
+ Tag_2: tag_2
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_nat_gateways(client, module, **params):
+ try:
+ paginator = client.get_paginator('describe_nat_gateways')
+ return paginator.paginate(**params).build_full_result()['NatGateways']
+ except is_boto3_error_code('InvalidNatGatewayID.NotFound'):
+ module.exit_json(msg="NAT gateway not found.")
+ except is_boto3_error_code('NatGatewayMalformed'): # pylint: disable=duplicate-except
+ module.fail_json_aws(msg="NAT gateway id is malformed.")
+
+
+def get_nat_gateways(client, module):
+ params = dict()
+ nat_gateways = list()
+
+ params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params['NatGatewayIds'] = module.params.get('nat_gateway_ids')
+
+ try:
+ result = normalize_boto3_result(_describe_nat_gateways(client, module, **params))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, 'Unable to describe NAT gateways.')
+
+ for gateway in result:
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ converted_gateway = camel_dict_to_snake_dict(gateway)
+ if 'tags' in converted_gateway:
+ # Turn the boto3 result into ansible friendly tag dictionary
+ converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags'])
+ nat_gateways.append(converted_gateway)
+
+ return nat_gateways
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict'),
+ nat_gateway_ids=dict(default=[], type='list', elements='str'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+
+ try:
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ results = get_nat_gateways(connection, module)
+
+ module.exit_json(result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py
new file mode 100644
index 00000000..c7430e98
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py
@@ -0,0 +1,720 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net
+version_added: 1.0.0
+short_description: Configure AWS Virtual Private Clouds
+description:
+ - Create, modify, and terminate AWS Virtual Private Clouds (VPCs).
+author:
+ - Jonathan Davila (@defionscode)
+ - Sloane Hertel (@s-hertel)
+options:
+ name:
+ description:
+ - The name to give your VPC. This is used in combination with I(cidr_block)
+ to determine if a VPC already exists.
+ - The value of I(name) overrides any value set for C(Name) in the I(tags)
+ parameter.
+ - At least one of I(name) and I(vpc_id) must be specified.
+ - I(name) must be specified when creating a new VPC.
+ type: str
+ vpc_id:
+ version_added: 4.0.0
+ description:
+ - The ID of the VPC.
+ - At least one of I(name) and I(vpc_id) must be specified.
+ - At least one of I(name) and I(cidr_block) must be specified.
+ type: str
+ cidr_block:
+ description:
+ - The primary CIDR of the VPC.
+ - The first in the list will be used as the primary CIDR
+ and is used in conjunction with I(name) to ensure idempotence.
+ - Required when I(vpc_id) is not set.
+ type: list
+ elements: str
+ ipv6_cidr:
+ description:
+ - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
+ or the size of the CIDR block.
+ - Default value is C(false) when creating a new VPC.
+ type: bool
+ purge_cidrs:
+ description:
+ - Remove CIDRs that are associated with the VPC and are not specified in I(cidr_block).
+ default: false
+ type: bool
+ tenancy:
+ description:
+ - Whether to be default or dedicated tenancy.
+ - This cannot be changed after the VPC has been created.
+ default: default
+ choices: [ 'default', 'dedicated' ]
+ type: str
+ dns_support:
+ description:
+ - Whether to enable AWS DNS support.
+ - Default value is C(true) when creating a new VPC.
+ type: bool
+ dns_hostnames:
+ description:
+ - Whether to enable AWS hostname support.
+ - Default value is C(true) when creating a new VPC.
+ type: bool
+ dhcp_opts_id:
+ description:
+ - The id of the DHCP options to use for this VPC.
+ type: str
+ state:
+ description:
+ - The state of the VPC. Either absent or present.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ multi_ok:
+ description:
+ - By default the module will not create another VPC if there is another VPC with the same name and CIDR block.
+ Specify I(multi_ok=true) if you want duplicate VPCs created.
+ type: bool
+ default: false
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a VPC with dedicated tenancy and a couple of tags
+ amazon.aws.ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ region: us-east-1
+ tags:
+ module: ec2_vpc_net
+ this: works
+ tenancy: dedicated
+
+- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
+ amazon.aws.ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ ipv6_cidr: True
+ region: us-east-1
+ tenancy: dedicated
+'''
+
+RETURN = '''
+vpc:
+ description: info about the VPC that was created or deleted
+ returned: always
+ type: complex
+ contains:
+ cidr_block:
+ description: The CIDR of the VPC
+ returned: always
+ type: str
+ sample: 10.0.0.0/16
+ cidr_block_association_set:
+ description: IPv4 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "cidr_block": "10.0.0.0/24",
+ "cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ classic_link_enabled:
+ description: indicates whether ClassicLink is enabled
+ returned: always
+ type: bool
+ sample: false
+ dhcp_options_id:
+ description: the id of the DHCP options associated with this VPC
+ returned: always
+ type: str
+ sample: dopt-12345678
+ id:
+ description: VPC resource id
+ returned: always
+ type: str
+ sample: vpc-12345678
+ name:
+ description: The Name tag of the VPC.
+ returned: When the Name tag has been set on the VPC
+ type: str
+ sample: MyVPC
+ version_added: 4.0.0
+ instance_tenancy:
+ description: indicates whether VPC uses default or dedicated tenancy
+ returned: always
+ type: str
+ sample: default
+ ipv6_cidr_block_association_set:
+ description: IPv6 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "ipv6_cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "ipv6_cidr_block": "2001:db8::/56",
+ "ipv6_cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ is_default:
+ description: indicates whether this is the default VPC
+ returned: always
+ type: bool
+ sample: false
+ state:
+ description: state of the VPC
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the VPC, includes name
+ returned: always
+ type: complex
+ contains:
+ Name:
+ description: name tag for the VPC
+ returned: always
+ type: str
+ sample: pk_vpc4
+ owner_id:
+ description: The AWS account which owns the VPC.
+ returned: always
+ type: str
+ sample: 123456789012
+'''
+
+from time import sleep
+from time import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.network import to_subnet
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+def vpc_exists(module, vpc, name, cidr_block, multi):
+ """Returns None or a vpc object depending on the existence of a VPC. When supplied
+ with a CIDR, it will check for matching tags to determine if it is a match
+ otherwise it will assume the VPC does not exist and thus return None.
+ """
+ try:
+ vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': cidr_block})
+ matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs']
+ # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
+ if not matching_vpcs:
+ vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': [cidr_block[0]]})
+ matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ if multi:
+ return None
+ elif len(matching_vpcs) == 1:
+ return matching_vpcs[0]['VpcId']
+ elif len(matching_vpcs) > 1:
+ module.fail_json(msg='Currently there are %d VPCs that have the same name and '
+ 'CIDR block you specified. If you would like to create '
+ 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
+ return None
+
+
+def get_classic_link_status(module, connection, vpc_id):
+ try:
+ results = connection.describe_vpc_classic_link(aws_retry=True, VpcIds=[vpc_id])
+ return results['Vpcs'][0].get('ClassicLinkEnabled')
+ except is_boto3_error_message('The functionality you requested is not available in this region.'):
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+
+def wait_for_vpc_to_exist(module, connection, **params):
+ # wait for vpc to be available
+ try:
+ get_waiter(connection, 'vpc_exists').wait(**params)
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg="VPC failed to reach expected state (exists)")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC creation.")
+
+
+def wait_for_vpc(module, connection, **params):
+ # wait for vpc to be available
+ try:
+ get_waiter(connection, 'vpc_available').wait(**params)
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg="VPC failed to reach expected state (available)")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC state to update.")
+
+
+def get_vpc(module, connection, vpc_id, wait=True):
+ wait_for_vpc(module, connection, VpcIds=[vpc_id])
+ try:
+ vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ vpc_obj['ClassicLinkEnabled'] = get_classic_link_status(module, connection, vpc_id)
+
+ return vpc_obj
+
+
+def update_vpc_tags(connection, module, vpc_id, tags, name, purge_tags):
+ # Name is a tag rather than a direct parameter, we need to inject 'Name'
+ # into tags, but since tags isn't explicitly passed we'll treat it not being
+ # set as purge_tags == False
+ if name:
+ if purge_tags and tags is None:
+ purge_tags = False
+ tags = tags or {}
+ tags.update({'Name': name})
+
+ if tags is None:
+ return False
+
+ changed = ensure_ec2_tags(connection, module, vpc_id, tags=tags, purge_tags=purge_tags)
+ if not changed or module.check_mode:
+ return changed
+
+ return True
+
+
+def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ if dhcp_id is None:
+ return False
+ if vpc_obj['DhcpOptionsId'] == dhcp_id:
+ return False
+ if module.check_mode:
+ return True
+
+ try:
+ connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
+
+ return True
+
+
+def create_vpc(connection, module, cidr_block, tenancy, tags, ipv6_cidr, name):
+ if module.check_mode:
+ module.exit_json(changed=True, msg="VPC would be created if not in check mode")
+
+ create_args = dict(
+ CidrBlock=cidr_block, InstanceTenancy=tenancy,
+ )
+
+ if name:
+ tags = tags or {}
+ tags['Name'] = name
+ if tags:
+ create_args['TagSpecifications'] = boto3_tag_specifications(tags, 'vpc')
+
+ # Defaults to False (including None)
+ if ipv6_cidr:
+ create_args['AmazonProvidedIpv6CidrBlock'] = True
+
+ try:
+ vpc_obj = connection.create_vpc(aws_retry=True, **create_args)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to create the VPC")
+
+ # wait up to 30 seconds for vpc to exist
+ wait_for_vpc_to_exist(
+ module, connection,
+ VpcIds=[vpc_obj['Vpc']['VpcId']],
+ WaiterConfig=dict(MaxAttempts=30)
+ )
+ # Wait for the VPC to enter an 'Available' State
+ wait_for_vpc(
+ module, connection,
+ VpcIds=[vpc_obj['Vpc']['VpcId']],
+ WaiterConfig=dict(MaxAttempts=30)
+ )
+
+ return vpc_obj['Vpc']['VpcId']
+
+
+def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
+ if expected_value is None:
+ return
+ if module.check_mode:
+ return
+
+ start_time = time()
+ updated = False
+ while time() < start_time + 300:
+ current_value = connection.describe_vpc_attribute(
+ Attribute=attribute,
+ VpcId=vpc_id,
+ aws_retry=True
+ )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
+ if current_value != expected_value:
+ sleep(3)
+ else:
+ updated = True
+ break
+ if not updated:
+ module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
+
+
+def wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_assoc_state):
+ """
+ If ipv6_assoc_state is True, wait for VPC to be associated with at least one Amazon-provided IPv6 CIDR block.
+ If ipv6_assoc_state is False, wait for VPC to be dissociated from all Amazon-provided IPv6 CIDR blocks.
+ """
+
+ if ipv6_assoc_state is None:
+ return
+ if module.check_mode:
+ return
+
+ start_time = time()
+ criteria_match = False
+ while time() < start_time + 300:
+ current_value = get_vpc(module, connection, vpc_id)
+ if current_value:
+ ipv6_set = current_value.get('Ipv6CidrBlockAssociationSet')
+ if ipv6_set:
+ if ipv6_assoc_state:
+ # At least one 'Amazon' IPv6 CIDR block must be associated.
+ for val in ipv6_set:
+ if val.get('Ipv6Pool') == 'Amazon' and val.get("Ipv6CidrBlockState").get("State") == "associated":
+ criteria_match = True
+ break
+ if criteria_match:
+ break
+ else:
+ # All 'Amazon' IPv6 CIDR blocks must be disassociated.
+ expected_count = sum(
+ [(val.get("Ipv6Pool") == "Amazon") for val in ipv6_set])
+ actual_count = sum([(val.get('Ipv6Pool') == 'Amazon' and
+ val.get("Ipv6CidrBlockState").get("State") == "disassociated") for val in ipv6_set])
+ if actual_count == expected_count:
+ criteria_match = True
+ break
+ sleep(3)
+ if not criteria_match:
+ module.fail_json(msg="Failed to wait for IPv6 CIDR association")
+
+
+def get_cidr_network_bits(module, cidr_block):
+ if cidr_block is None:
+ return None
+
+ fixed_cidrs = []
+ for cidr in cidr_block:
+ split_addr = cidr.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 CIDR that may or may not have host bits set
+ # Get the network bits.
+ valid_cidr = to_subnet(split_addr[0], split_addr[1])
+ if cidr != valid_cidr:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
+ fixed_cidrs.append(valid_cidr)
+ else:
+ # let AWS handle invalid CIDRs
+ fixed_cidrs.append(cidr)
+ return fixed_cidrs
+
+
+def update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr):
+ if ipv6_cidr is None:
+ return False
+
+ # Fetch current state from vpc_object
+ current_ipv6_cidr = False
+ if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
+ for ipv6_assoc in vpc_obj['Ipv6CidrBlockAssociationSet']:
+ if ipv6_assoc['Ipv6Pool'] == 'Amazon' and ipv6_assoc['Ipv6CidrBlockState']['State'] in ['associated', 'associating']:
+ current_ipv6_cidr = True
+ break
+
+ if ipv6_cidr == current_ipv6_cidr:
+ return False
+
+ if module.check_mode:
+ return True
+
+ # There's no block associated, and we want one to be associated
+ if ipv6_cidr:
+ try:
+ connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate IPv6 CIDR")
+ else:
+ for ipv6_assoc in vpc_obj['Ipv6CidrBlockAssociationSet']:
+ if ipv6_assoc['Ipv6Pool'] == 'Amazon' and ipv6_assoc['Ipv6CidrBlockState']['State'] in ['associated', 'associating']:
+ try:
+ connection.disassociate_vpc_cidr_block(AssociationId=ipv6_assoc['AssociationId'], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to disassociate IPv6 CIDR {0}.".format(ipv6_assoc['AssociationId']))
+ return True
+
+
+def update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs):
+ if cidr_block is None:
+ return False, None
+
+ associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
+ if cidr['CidrBlockState']['State'] not in ['disassociating', 'disassociated'])
+
+ current_cidrs = set(associated_cidrs.keys())
+ desired_cidrs = set(cidr_block)
+ if not purge_cidrs:
+ desired_cidrs = desired_cidrs.union(current_cidrs)
+
+ cidrs_to_add = list(desired_cidrs.difference(current_cidrs))
+ cidrs_to_remove = list(current_cidrs.difference(desired_cidrs))
+
+ if not cidrs_to_add and not cidrs_to_remove:
+ return False, None
+
+ if module.check_mode:
+ return True, list(desired_cidrs)
+
+ for cidr in cidrs_to_add:
+ try:
+ connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(cidr))
+
+ for cidr in cidrs_to_remove:
+ association_id = associated_cidrs[cidr]
+ try:
+ connection.disassociate_vpc_cidr_block(AssociationId=association_id, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
+ "are associated with the CIDR block before you can disassociate it.".format(association_id))
+ return True, list(desired_cidrs)
+
+
+def update_dns_enabled(connection, module, vpc_id, dns_support):
+ if dns_support is None:
+ return False
+
+ current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
+ if current_dns_enabled == dns_support:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns support attribute")
+ return True
+
+
+def update_dns_hostnames(connection, module, vpc_id, dns_hostnames):
+ if dns_hostnames is None:
+ return False
+
+ current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
+ if current_dns_hostnames == dns_hostnames:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
+ return True
+
+
+def delete_vpc(connection, module, vpc_id):
+ if vpc_id is None:
+ return False
+ if module.check_mode:
+ return True
+
+ try:
+ connection.delete_vpc(VpcId=vpc_id, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
+ "and/or ec2_vpc_route_table modules to ensure that all depenednt components are absent.".format(vpc_id)
+ )
+
+ return True
+
+
+def wait_for_updates(connection, module, vpc_id, ipv6_cidr, expected_cidrs, dns_support, dns_hostnames, tags, dhcp_id):
+
+ if module.check_mode:
+ return
+
+ if expected_cidrs:
+ wait_for_vpc(
+ module, connection,
+ VpcIds=[vpc_id],
+ Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
+ )
+ wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_cidr)
+
+ if tags is not None:
+ tag_list = ansible_dict_to_boto3_tag_list(tags)
+ filters = [{'Name': 'tag:{0}'.format(t['Key']), 'Values': [t['Value']]} for t in tag_list]
+ wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters)
+
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
+
+ if dhcp_id is not None:
+ # Wait for DhcpOptionsId to be updated
+ filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
+ wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters)
+
+ return
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False),
+ vpc_id=dict(type='str', required=False, default=None),
+ cidr_block=dict(type='list', elements='str'),
+ ipv6_cidr=dict(type='bool', default=None),
+ tenancy=dict(choices=['default', 'dedicated'], default='default'),
+ dns_support=dict(type='bool'),
+ dns_hostnames=dict(type='bool'),
+ dhcp_opts_id=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ multi_ok=dict(type='bool', default=False),
+ purge_cidrs=dict(type='bool', default=False),
+ )
+ required_one_of = [
+ ['vpc_id', 'name'],
+ ['vpc_id', 'cidr_block'],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=required_one_of,
+ supports_check_mode=True
+ )
+
+ name = module.params.get('name')
+ vpc_id = module.params.get('vpc_id')
+ cidr_block = module.params.get('cidr_block')
+ ipv6_cidr = module.params.get('ipv6_cidr')
+ purge_cidrs = module.params.get('purge_cidrs')
+ tenancy = module.params.get('tenancy')
+ dns_support = module.params.get('dns_support')
+ dns_hostnames = module.params.get('dns_hostnames')
+ dhcp_id = module.params.get('dhcp_opts_id')
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ state = module.params.get('state')
+ multi = module.params.get('multi_ok')
+
+ changed = False
+
+ connection = module.client(
+ 'ec2',
+ retry_decorator=AWSRetry.jittered_backoff(
+ retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
+ ),
+ )
+
+ if dns_hostnames and not dns_support:
+ module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
+
+ cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
+
+ if vpc_id is None:
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if state == 'present':
+
+ # Check if VPC exists
+ if vpc_id is None:
+ if module.params.get('name') is None:
+ module.fail_json('The name parameter must be specified when creating a new VPC.')
+ vpc_id = create_vpc(connection, module, cidr_block[0], tenancy, tags, ipv6_cidr, name)
+ changed = True
+ vpc_obj = get_vpc(module, connection, vpc_id)
+ if len(cidr_block) > 1:
+ cidrs_changed, desired_cidrs = update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs)
+ changed |= cidrs_changed
+ else:
+ desired_cidrs = None
+ # Set on-creation defaults
+ if dns_hostnames is None:
+ dns_hostnames = True
+ if dns_support is None:
+ dns_support = True
+ else:
+ vpc_obj = get_vpc(module, connection, vpc_id)
+ cidrs_changed, desired_cidrs = update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs)
+ changed |= cidrs_changed
+ ipv6_changed = update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr)
+ changed |= ipv6_changed
+ tags_changed = update_vpc_tags(connection, module, vpc_id, tags, name, purge_tags)
+ changed |= tags_changed
+
+ dhcp_changed = update_dhcp_opts(connection, module, vpc_obj, dhcp_id)
+ changed |= dhcp_changed
+ dns_changed = update_dns_enabled(connection, module, vpc_id, dns_support)
+ changed |= dns_changed
+ hostnames_changed = update_dns_hostnames(connection, module, vpc_id, dns_hostnames)
+ changed |= hostnames_changed
+
+ wait_for_updates(connection, module, vpc_id, ipv6_cidr, desired_cidrs, dns_support, dns_hostnames, tags, dhcp_id)
+
+ updated_obj = get_vpc(module, connection, vpc_id)
+ final_state = camel_dict_to_snake_dict(updated_obj)
+ final_state['tags'] = boto3_tag_list_to_ansible_dict(updated_obj.get('Tags', []))
+ final_state['name'] = final_state['tags'].get('Name', None)
+ final_state['id'] = final_state.pop('vpc_id')
+
+ module.exit_json(changed=changed, vpc=final_state)
+
+ elif state == 'absent':
+ changed = delete_vpc(connection, module, vpc_id)
+ module.exit_json(changed=changed, vpc={})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py
new file mode 100644
index 00000000..eb10b957
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPCs in AWS
+description:
+ - Gather information about ec2 VPCs in AWS
+author: "Rob White (@wimnat)"
+options:
+ vpc_ids:
+ description:
+ - A list of VPC IDs that exist in your account.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all VPCs
+- amazon.aws.ec2_vpc_net_info:
+
+# Gather information about a particular VPC using VPC ID
+- amazon.aws.ec2_vpc_net_info:
+ vpc_ids: vpc-00112233
+
+# Gather information about any VPC with a tag key Name and value Example
+- amazon.aws.ec2_vpc_net_info:
+ filters:
+ "tag:Name": Example
+
+'''
+
+RETURN = '''
+vpcs:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: The ID of the VPC (for backwards compatibility).
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC.
+ returned: always
+ type: str
+ state:
+ description: The state of the VPC.
+ returned: always
+ type: str
+ tags:
+ description: A dict of tags associated with the VPC.
+ returned: always
+ type: dict
+ instance_tenancy:
+ description: The instance tenancy setting for the VPC.
+ returned: always
+ type: str
+ is_default:
+ description: True if this is the default VPC for account.
+ returned: always
+ type: bool
+ cidr_block:
+ description: The IPv4 CIDR block assigned to the VPC.
+ returned: always
+ type: str
+ classic_link_dns_supported:
+ description: True/False depending on attribute setting for classic link DNS support.
+ returned: always
+ type: bool
+ classic_link_enabled:
+ description: True/False depending on if classic link support is enabled.
+ returned: always
+ type: bool
+ enable_dns_hostnames:
+ description: True/False depending on attribute setting for DNS hostnames support.
+ returned: always
+ type: bool
+ enable_dns_support:
+ description: True/False depending on attribute setting for DNS support.
+ returned: always
+ type: bool
+ cidr_block_association_set:
+ description: An array of IPv4 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID.
+ returned: always
+ type: str
+ cidr_block:
+ description: The IPv4 CIDR block that is associated with the VPC.
+ returned: always
+ type: str
+ cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID.
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the VPC.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+ owner_id:
+ description: The AWS account which owns the VPC.
+ returned: always
+ type: str
+ sample: 123456789012
+ dhcp_options_id:
+ description: The ID of the DHCP options associated with this VPC.
+ returned: always
+ type: str
+ sample: dopt-12345678
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def describe_vpcs(connection, module):
+ """
+ Describe VPCs.
+
+ connection : boto3 client connection object
+ module : AnsibleAWSModule object
+ """
+ # collect parameters
+ filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ vpc_ids = module.params.get('vpc_ids')
+
+ # init empty list for return vars
+ vpc_info = list()
+
+ # Get the basic VPC info
+ try:
+ response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids))
+
+ # We can get these results in bulk but still needs two separate calls to the API
+ cl_enabled = {}
+ cl_dns_support = {}
+ dns_support = {}
+ dns_hostnames = {}
+ # Loop through the results and add the other VPC attributes we gathered
+ for vpc in response['Vpcs']:
+ error_message = "Unable to describe VPC attribute {0} on VPC {1}"
+ cl_enabled = describe_classic_links(module, connection, vpc['VpcId'], 'ClassicLinkEnabled', error_message)
+ cl_dns_support = describe_classic_links(module, connection, vpc['VpcId'], 'ClassicLinkDnsSupported', error_message)
+ dns_support = describe_vpc_attribute(module, connection, vpc['VpcId'], 'enableDnsSupport', error_message)
+ dns_hostnames = describe_vpc_attribute(module, connection, vpc['VpcId'], 'enableDnsHostnames', error_message)
+ if cl_enabled:
+ # loop through the ClassicLink Enabled results and add the value for the correct VPC
+ for item in cl_enabled['Vpcs']:
+ if vpc['VpcId'] == item['VpcId']:
+ vpc['ClassicLinkEnabled'] = item.get('ClassicLinkEnabled', False)
+ if cl_dns_support:
+ # loop through the ClassicLink DNS support results and add the value for the correct VPC
+ for item in cl_dns_support['Vpcs']:
+ if vpc['VpcId'] == item['VpcId']:
+ vpc['ClassicLinkDnsSupported'] = item.get('ClassicLinkDnsSupported', False)
+
+ # add the two DNS attributes
+ if dns_support:
+ vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value')
+ if dns_hostnames:
+ vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value')
+ # for backwards compatibility
+ vpc['id'] = vpc['VpcId']
+ vpc_info.append(camel_dict_to_snake_dict(vpc))
+ # convert tag list to ansible dict
+ vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', []))
+
+ module.exit_json(vpcs=vpc_info)
+
+
+def describe_classic_links(module, connection, vpc, attribute, error_message):
+ result = None
+ try:
+ if attribute == "ClassicLinkEnabled":
+ result = connection.describe_vpc_classic_link(VpcIds=[vpc], aws_retry=True)
+ else:
+ result = connection.describe_vpc_classic_link_dns_support(VpcIds=[vpc], aws_retry=True)
+ except is_boto3_error_code('UnsupportedOperation'):
+ result = {'Vpcs': [{'VpcId': vpc}]}
+ except is_boto3_error_code('InvalidVpcID.NotFound'):
+ module.warn(error_message.format(attribute, vpc))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Unable to describe if {0} is enabled'.format(attribute))
+ return result
+
+
+def describe_vpc_attribute(module, connection, vpc, attribute, error_message):
+ result = None
+ try:
+ return connection.describe_vpc_attribute(VpcId=vpc, Attribute=attribute, aws_retry=True)
+ except is_boto3_error_code('InvalidVpcID.NotFound'):
+ module.warn(error_message.format(attribute, vpc))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg=error_message.format(attribute, vpc))
+ return result
+
+
+def main():
+ argument_spec = dict(
+ vpc_ids=dict(type='list', elements='str', default=[]),
+ filters=dict(type='dict', default={})
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ describe_vpcs(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py
new file mode 100644
index 00000000..7a9f1aa8
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py
@@ -0,0 +1,842 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_route_table
+version_added: 1.0.0
+short_description: Manage route tables for AWS Virtual Private Clouds
+description:
+ - Manage route tables for AWS Virtual Private Clouds (VPCs).
+author:
+ - Robert Estelle (@erydo)
+ - Rob White (@wimnat)
+ - Will Thames (@willthames)
+options:
+ gateway_id:
+ description:
+ - The ID of the gateway to associate with the route table.
+ - If I(gateway_id) is C('None') or C(''), gateway will be disassociated with the route table.
+ type: str
+ version_added: 3.2.0
+ lookup:
+ description:
+ - Look up route table by either I(tags) or by I(route_table_id).
+ - If I(lookup=tag) and I(tags) is not specified then no lookup for an
+ existing route table is performed and a new route table will be created.
+ - When using I(lookup=tag), multiple matches being found will result in
+ a failure and no changes will be made.
+ - To change the tags of a route table use I(lookup=id).
+ - I(vpc_id) must be specified when I(lookup=tag).
+ default: tag
+ choices: [ 'tag', 'id' ]
+ type: str
+ propagating_vgw_ids:
+ description: Enable route propagation from virtual gateways specified by ID.
+ type: list
+ elements: str
+ purge_routes:
+ description: Purge existing routes that are not found in routes.
+ type: bool
+ default: True
+ purge_subnets:
+ description:
+ - Purge existing subnets that are not found in subnets.
+ - Ignored unless the subnets option is supplied.
+ default: True
+ type: bool
+ route_table_id:
+ description:
+ - The ID of the route table to update or delete.
+ - Required when I(lookup=id).
+ type: str
+ routes:
+ description:
+ - List of routes in the route table.
+ - Routes are specified as dicts containing the keys C(dest) and one of C(gateway_id),
+ C(instance_id), C(network_interface_id), or C(vpc_peering_connection_id).
+ - The value of C(dest) is used for the destination match. It may be a IPv4 CIDR block
+ or a IPv6 CIDR block.
+ - If I(gateway_id) is specified, you can refer to the VPC's IGW by using the value C(igw).
+ - Routes are required for present states.
+ type: list
+ elements: dict
+ state:
+ description: Create or destroy the VPC route table.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ subnets:
+ description: An array of subnets to add to this route table. Subnets may be specified
+ by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24' or 'fd00::/8'.
+ type: list
+ elements: str
+ vpc_id:
+ description:
+ - VPC ID of the VPC in which to create the route table.
+ - Required when I(state=present) or I(lookup=tag).
+ type: str
+notes:
+ - Tags are used to uniquely identify route tables within a VPC when the I(route_table_id) is not supplied.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up public subnet route table
+ amazon.aws.ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Public
+ subnets:
+ - "{{ jumpbox_subnet.subnet.id }}"
+ - "{{ frontend_subnet.subnet.id }}"
+ - "{{ vpn_subnet.subnet_id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ - dest: ::/0
+ gateway_id: "{{ igw.gateway_id }}"
+ register: public_route_table
+
+- name: Create VPC gateway
+ amazon.aws.ec2_vpc_igw:
+ vpc_id: vpc-1245678
+ register: vpc_igw
+
+- name: Create gateway route table
+ amazon.aws.ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ tags:
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ register: gateway_route_table
+
+- name: Disassociate gateway from route table
+ amazon.aws.ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ tags:
+ Name: Gateway route table
+ gateway_id: None
+ register: gateway_route_table
+
+- name: Set up NAT-protected route table
+ amazon.aws.ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Internal
+ subnets:
+ - "{{ application_subnet.subnet.id }}"
+ - 'Database Subnet'
+ - '10.0.0.0/8'
+ routes:
+ - dest: 0.0.0.0/0
+ instance_id: "{{ nat.instance_id }}"
+ register: nat_route_table
+
+- name: delete route table
+ amazon.aws.ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ route_table_id: "{{ route_table.id }}"
+ lookup: id
+ state: absent
+'''
+
+RETURN = r'''
+route_table:
+ description: Route Table result.
+ returned: always
+ type: complex
+ contains:
+ associations:
+ description: List of associations between the route table and one or more subnets or a gateway.
+ returned: always
+ type: complex
+ contains:
+ association_state:
+ description: The state of the association.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: The state of the association.
+ returned: always
+ type: str
+ sample: associated
+ state_message:
+ description: Additional information about the state of the association.
+ returned: when available
+ type: str
+ sample: 'Creating association'
+ gateway_id:
+ description: ID of the internet gateway or virtual private gateway.
+ returned: when route table is a gateway route table
+ type: str
+ sample: igw-03312309
+ main:
+ description: Whether this is the main route table.
+ returned: always
+ type: bool
+ sample: false
+ route_table_association_id:
+ description: ID of association between route table and subnet.
+ returned: always
+ type: str
+ sample: rtbassoc-ab47cfc3
+ route_table_id:
+ description: ID of the route table.
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ subnet_id:
+ description: ID of the subnet.
+ returned: when route table is a subnet route table
+ type: str
+ sample: subnet-82055af9
+ id:
+ description: ID of the route table (same as route_table_id for backwards compatibility).
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ propagating_vgws:
+ description: List of Virtual Private Gateways propagating routes.
+ returned: always
+ type: list
+ sample: []
+ route_table_id:
+ description: ID of the route table.
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ routes:
+ description: List of routes in the route table.
+ returned: always
+ type: complex
+ contains:
+ destination_cidr_block:
+ description: IPv4 CIDR block of destination
+ returned: always
+ type: str
+ sample: 10.228.228.0/22
+ destination_ipv6_cidr_block:
+ description: IPv6 CIDR block of destination
+ returned: when the route includes an IPv6 destination
+ type: str
+ sample: 2600:1f1c:1b3:8f00:8000::/65
+ gateway_id:
+ description: ID of the gateway.
+ returned: when gateway is local or internet gateway
+ type: str
+ sample: local
+ instance_id:
+ description: ID of a NAT instance.
+ returned: when the route is via an EC2 instance
+ type: str
+ sample: i-abcd123456789
+ instance_owner_id:
+ description: AWS account owning the NAT instance.
+ returned: when the route is via an EC2 instance
+ type: str
+ sample: 123456789012
+ nat_gateway_id:
+ description: ID of the NAT gateway.
+ returned: when the route is via a NAT gateway
+ type: str
+ sample: local
+ origin:
+ description: mechanism through which the route is in the table.
+ returned: always
+ type: str
+ sample: CreateRouteTable
+ state:
+ description: state of the route.
+ returned: always
+ type: str
+ sample: active
+ tags:
+ description: Tags applied to the route table.
+ returned: always
+ type: dict
+ sample:
+ Name: Public route table
+ Public: 'true'
+ vpc_id:
+ description: ID for the VPC in which the route lives.
+ returned: always
+ type: str
+ sample: vpc-6e2d2407
+'''
+
+import re
+from time import sleep
+from ipaddress import ip_network
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+@AWSRetry.jittered_backoff()
+def describe_subnets_with_backoff(connection, **params):
+ paginator = connection.get_paginator('describe_subnets')
+ return paginator.paginate(**params).build_full_result()['Subnets']
+
+
+@AWSRetry.jittered_backoff()
+def describe_igws_with_backoff(connection, **params):
+ paginator = connection.get_paginator('describe_internet_gateways')
+ return paginator.paginate(**params).build_full_result()['InternetGateways']
+
+
+@AWSRetry.jittered_backoff()
+def describe_route_tables_with_backoff(connection, **params):
+ try:
+ paginator = connection.get_paginator('describe_route_tables')
+ return paginator.paginate(**params).build_full_result()['RouteTables']
+ except is_boto3_error_code('InvalidRouteTableID.NotFound'):
+ return None
+
+
+def find_subnets(connection, module, vpc_id, identified_subnets):
+ """
+ Finds a list of subnets, each identified either by a raw ID, a unique
+ 'Name' tag, or a CIDR such as 10.0.0.0/8.
+ """
+ CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$')
+ SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$')
+
+ subnet_ids = []
+ subnet_names = []
+ subnet_cidrs = []
+ for subnet in (identified_subnets or []):
+ if re.match(SUBNET_RE, subnet):
+ subnet_ids.append(subnet)
+ elif re.match(CIDR_RE, subnet):
+ subnet_cidrs.append(subnet)
+ else:
+ subnet_names.append(subnet)
+
+ subnets_by_id = []
+ if subnet_ids:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
+ try:
+ subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids)
+
+ subnets_by_cidr = []
+ if subnet_cidrs:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs})
+ try:
+ subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs)
+
+ subnets_by_name = []
+ if subnet_names:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names})
+ try:
+ subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names)
+
+ for name in subnet_names:
+ matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name])
+ if matching_count == 0:
+ module.fail_json(msg='Subnet named "{0}" does not exist'.format(name))
+ elif matching_count > 1:
+ module.fail_json(msg='Multiple subnets named "{0}"'.format(name))
+
+ return subnets_by_id + subnets_by_cidr + subnets_by_name
+
+
+def find_igw(connection, module, vpc_id):
+ """
+ Finds the Internet gateway for the given VPC ID.
+ """
+ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
+ try:
+ igw = describe_igws_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id))
+ if len(igw) == 1:
+ return igw[0]['InternetGatewayId']
+ elif len(igw) == 0:
+ module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id))
+ else:
+ module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id))
+
+
+def tags_match(match_tags, candidate_tags):
+ return all((k in candidate_tags and candidate_tags[k] == v
+ for k, v in match_tags.items()))
+
+
+def get_route_table_by_id(connection, module, route_table_id):
+
+ route_table = None
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route table")
+ if route_tables:
+ route_table = route_tables[0]
+
+ return route_table
+
+
+def get_route_table_by_tags(connection, module, vpc_id, tags):
+ count = 0
+ route_table = None
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route table")
+ for table in route_tables:
+ this_tags = describe_ec2_tags(connection, module, table['RouteTableId'])
+ if tags_match(tags, this_tags):
+ route_table = table
+ count += 1
+
+ if count > 1:
+ module.fail_json(msg="Tags provided do not identify a unique route table")
+ else:
+ return route_table
+
+
+def route_spec_matches_route(route_spec, route):
+ if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']:
+ route_spec['NatGatewayId'] = route_spec.pop('GatewayId')
+ if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']:
+ if route_spec.get('DestinationCidrBlock', '').startswith('pl-'):
+ route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock')
+
+ return set(route_spec.items()).issubset(route.items())
+
+
+def route_spec_matches_route_cidr(route_spec, route):
+ if route_spec.get('DestinationCidrBlock') and route.get('DestinationCidrBlock'):
+ return route_spec.get('DestinationCidrBlock') == route.get('DestinationCidrBlock')
+ if route_spec.get('DestinationIpv6CidrBlock') and route.get('DestinationIpv6CidrBlock'):
+ return route_spec.get('DestinationIpv6CidrBlock') == route.get('DestinationIpv6CidrBlock')
+ return False
+
+
+def rename_key(d, old_key, new_key):
+ d[new_key] = d.pop(old_key)
+
+
+def index_of_matching_route(route_spec, routes_to_match):
+ for i, route in enumerate(routes_to_match):
+ if route_spec_matches_route(route_spec, route):
+ return "exact", i
+ elif 'Origin' in route and route['Origin'] != 'EnableVgwRoutePropagation': # only replace created routes
+ if route_spec_matches_route_cidr(route_spec, route):
+ return "replace", i
+
+
+def ensure_routes(connection, module, route_table, route_specs, purge_routes):
+ routes_to_match = list(route_table['Routes'])
+ route_specs_to_create = []
+ route_specs_to_recreate = []
+ for route_spec in route_specs:
+ match = index_of_matching_route(route_spec, routes_to_match)
+ if match is None:
+ if route_spec.get('DestinationCidrBlock') or route_spec.get('DestinationIpv6CidrBlock'):
+ route_specs_to_create.append(route_spec)
+ else:
+ module.warn("Skipping creating {0} because it has no destination cidr block. "
+ "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec))
+ else:
+ if match[0] == "replace":
+ if route_spec.get('DestinationCidrBlock'):
+ route_specs_to_recreate.append(route_spec)
+ else:
+ module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec))
+ del routes_to_match[match[1]]
+
+ routes_to_delete = []
+ if purge_routes:
+ for route in routes_to_match:
+ if not route.get('DestinationCidrBlock'):
+ module.warn("Skipping purging route {0} because it has no destination cidr block. "
+ "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(route))
+ continue
+ if route['Origin'] == 'CreateRoute':
+ routes_to_delete.append(route)
+
+ changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate)
+ if changed and not module.check_mode:
+ for route in routes_to_delete:
+ try:
+ connection.delete_route(
+ aws_retry=True,
+ RouteTableId=route_table['RouteTableId'],
+ DestinationCidrBlock=route['DestinationCidrBlock'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete route")
+
+ for route_spec in route_specs_to_recreate:
+ try:
+ connection.replace_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't recreate route")
+
+ for route_spec in route_specs_to_create:
+ try:
+ connection.create_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec)
+ except is_boto3_error_code('RouteAlreadyExists'):
+ changed = False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't create route")
+
+ return changed
+
+
+def ensure_subnet_association(connection, module, vpc_id, route_table_id, subnet_id):
+ filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route tables")
+ for route_table in route_tables:
+ if route_table.get('RouteTableId'):
+ for association in route_table['Associations']:
+ if association['Main']:
+ continue
+ if association['SubnetId'] == subnet_id:
+ if route_table['RouteTableId'] == route_table_id:
+ return {'changed': False, 'association_id': association['RouteTableAssociationId']}
+ if module.check_mode:
+ return {'changed': True}
+ try:
+ connection.disassociate_route_table(
+ aws_retry=True, AssociationId=association['RouteTableAssociationId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
+
+ if module.check_mode:
+ return {'changed': True}
+ try:
+ association_id = connection.associate_route_table(aws_retry=True,
+ RouteTableId=route_table_id,
+ SubnetId=subnet_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate subnet with route table")
+ return {'changed': True, 'association_id': association_id}
+
+
+def ensure_subnet_associations(connection, module, route_table, subnets, purge_subnets):
+ current_association_ids = [association['RouteTableAssociationId'] for association in route_table['Associations']
+ if not association['Main'] and association.get('SubnetId')]
+ new_association_ids = []
+ changed = False
+ for subnet in subnets:
+ result = ensure_subnet_association(
+ connection=connection, module=module, vpc_id=route_table['VpcId'],
+ route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'])
+ changed = changed or result['changed']
+ if changed and module.check_mode:
+ return True
+ new_association_ids.append(result['association_id'])
+
+ if purge_subnets:
+ to_delete = [association_id for association_id in current_association_ids
+ if association_id not in new_association_ids]
+ for association_id in to_delete:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.disassociate_route_table(aws_retry=True, AssociationId=association_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
+
+ return changed
+
+
+def disassociate_gateway(connection, module, route_table):
+ # Delete all gateway associations that have state = associated
+ # Subnet associations are handled in its method
+ changed = False
+ associations_to_delete = [association['RouteTableAssociationId'] for association in route_table['Associations'] if not association['Main']
+ and association.get('GatewayId') and association['AssociationState']['State'] in ['associated', 'associating']]
+ for association_id in associations_to_delete:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.disassociate_route_table(aws_retry=True, AssociationId=association_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate gateway from route table")
+
+ return changed
+
+
+def associate_gateway(connection, module, route_table, gateway_id):
+ filters = ansible_dict_to_boto3_filter_list({'association.gateway-id': gateway_id, 'vpc-id': route_table['VpcId']})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route tables")
+ for table in route_tables:
+ if table.get('RouteTableId'):
+ for association in table.get('Associations'):
+ if association['Main']:
+ continue
+ if association.get('GatewayId', '') == gateway_id and (association['AssociationState']['State'] in ['associated', 'associating']):
+ if table['RouteTableId'] == route_table['RouteTableId']:
+ return False
+ elif module.check_mode:
+ return True
+ else:
+ try:
+ connection.disassociate_route_table(
+ aws_retry=True, AssociationId=association['RouteTableAssociationId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate gateway from route table")
+
+ if not module.check_mode:
+ try:
+ connection.associate_route_table(aws_retry=True,
+ RouteTableId=route_table['RouteTableId'],
+ GatewayId=gateway_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate gateway with route table")
+ return True
+
+
+def ensure_propagation(connection, module, route_table, propagating_vgw_ids):
+ changed = False
+ gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']]
+ vgws_to_add = set(propagating_vgw_ids) - set(gateways)
+ if vgws_to_add:
+ changed = True
+ if not module.check_mode:
+ for vgw_id in vgws_to_add:
+ try:
+ connection.enable_vgw_route_propagation(
+ aws_retry=True,
+ RouteTableId=route_table['RouteTableId'],
+ GatewayId=vgw_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't enable route propagation")
+
+ return changed
+
+
+def ensure_route_table_absent(connection, module):
+
+ lookup = module.params.get('lookup')
+ route_table_id = module.params.get('route_table_id')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ purge_subnets = module.params.get('purge_subnets')
+
+ if lookup == 'tag':
+ if tags is not None:
+ route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
+ else:
+ route_table = None
+ elif lookup == 'id':
+ route_table = get_route_table_by_id(connection, module, route_table_id)
+
+ if route_table is None:
+ return {'changed': False}
+
+ # disassociate subnets and gateway before deleting route table
+ if not module.check_mode:
+ ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
+ subnets=[], purge_subnets=purge_subnets)
+ disassociate_gateway(connection=connection, module=module, route_table=route_table)
+ try:
+ connection.delete_route_table(aws_retry=True, RouteTableId=route_table['RouteTableId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error deleting route table")
+
+ return {'changed': True}
+
+
+def get_route_table_info(connection, module, route_table):
+ result = get_route_table_by_id(connection, module, route_table['RouteTableId'])
+ try:
+ result['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get tags for route table")
+ result = camel_dict_to_snake_dict(result, ignore_list=['Tags'])
+ # backwards compatibility
+ result['id'] = result['route_table_id']
+ return result
+
+
+def create_route_spec(connection, module, vpc_id):
+ routes = module.params.get('routes')
+ for route_spec in routes:
+
+ cidr_block_type = str(type(ip_network(route_spec['dest'])))
+ if "IPv4" in cidr_block_type:
+ rename_key(route_spec, 'dest', 'destination_cidr_block')
+ if "IPv6" in cidr_block_type:
+ rename_key(route_spec, 'dest', 'destination_ipv6_cidr_block')
+
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
+ igw = find_igw(connection, module, vpc_id)
+ route_spec['gateway_id'] = igw
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'):
+ rename_key(route_spec, 'gateway_id', 'nat_gateway_id')
+
+ return snake_dict_to_camel_dict(routes, capitalize_first=True)
+
+
+def ensure_route_table_present(connection, module):
+
+ gateway_id = module.params.get('gateway_id')
+ lookup = module.params.get('lookup')
+ propagating_vgw_ids = module.params.get('propagating_vgw_ids')
+ purge_routes = module.params.get('purge_routes')
+ purge_subnets = module.params.get('purge_subnets')
+ purge_tags = module.params.get('purge_tags')
+ route_table_id = module.params.get('route_table_id')
+ subnets = module.params.get('subnets')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ routes = create_route_spec(connection, module, vpc_id)
+
+ changed = False
+ tags_valid = False
+
+ if lookup == 'tag':
+ if tags is not None:
+ try:
+ route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'")
+ else:
+ route_table = None
+ elif lookup == 'id':
+ try:
+ route_table = get_route_table_by_id(connection, module, route_table_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error finding route table with lookup 'id'")
+
+ # If no route table returned then create new route table
+ if route_table is None:
+ changed = True
+ if not module.check_mode:
+ try:
+ route_table = connection.create_route_table(aws_retry=True, VpcId=vpc_id)['RouteTable']
+ # try to wait for route table to be present before moving on
+ get_waiter(
+ connection, 'route_table_exists'
+ ).wait(
+ RouteTableIds=[route_table['RouteTableId']],
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='Timeout waiting for route table creation')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error creating route table")
+ else:
+ route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id}
+ module.exit_json(changed=changed, route_table=route_table)
+
+ if routes is not None:
+ result = ensure_routes(connection=connection, module=module, route_table=route_table,
+ route_specs=routes, purge_routes=purge_routes)
+ changed = changed or result
+
+ if propagating_vgw_ids is not None:
+ result = ensure_propagation(connection=connection, module=module, route_table=route_table,
+ propagating_vgw_ids=propagating_vgw_ids)
+ changed = changed or result
+
+ if not tags_valid and tags is not None:
+ changed |= ensure_ec2_tags(connection, module, route_table['RouteTableId'],
+ tags=tags, purge_tags=purge_tags,
+ retry_codes=['InvalidRouteTableID.NotFound'])
+ route_table['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId'])
+
+ if subnets is not None:
+ associated_subnets = find_subnets(connection, module, vpc_id, subnets)
+ result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
+ subnets=associated_subnets, purge_subnets=purge_subnets)
+ changed = changed or result
+
+ if gateway_id == 'None' or gateway_id == '':
+ gateway_changed = disassociate_gateway(connection=connection, module=module, route_table=route_table)
+ elif gateway_id is not None:
+ gateway_changed = associate_gateway(connection=connection, module=module, route_table=route_table, gateway_id=gateway_id)
+ else:
+ gateway_changed = False
+
+ changed = changed or gateway_changed
+
+ if changed:
+ # pause to allow route table routes/subnets/associations to be updated before exiting with final state
+ sleep(5)
+ module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table))
+
+
+def main():
+ argument_spec = dict(
+ gateway_id=dict(type='str'),
+ lookup=dict(default='tag', choices=['tag', 'id']),
+ propagating_vgw_ids=dict(type='list', elements='str'),
+ purge_routes=dict(default=True, type='bool'),
+ purge_subnets=dict(default=True, type='bool'),
+ purge_tags=dict(type='bool', default=True),
+ route_table_id=dict(),
+ routes=dict(default=[], type='list', elements='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ subnets=dict(type='list', elements='str'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ vpc_id=dict()
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['lookup', 'id', ['route_table_id']],
+ ['lookup', 'tag', ['vpc_id']],
+ ['state', 'present', ['vpc_id']]],
+ supports_check_mode=True)
+
+ # The tests for RouteTable existing uses its own decorator, we can safely
+ # retry on InvalidRouteTableID.NotFound
+ retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['InvalidRouteTableID.NotFound'])
+ connection = module.client('ec2', retry_decorator=retry_decorator)
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ result = ensure_route_table_present(connection, module)
+ elif state == 'absent':
+ result = ensure_route_table_absent(connection, module)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py
new file mode 100644
index 00000000..b7b3c69d
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ec2_vpc_route_table_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPC route tables in AWS
+description:
+ - Gather information about ec2 VPC route tables in AWS
+author:
+- "Rob White (@wimnat)"
+- "Mark Chappell (@tremble)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all VPC route tables
+ amazon.aws.ec2_vpc_route_table_info:
+
+- name: Gather information about a particular VPC route table using route table ID
+ amazon.aws.ec2_vpc_route_table_info:
+ filters:
+ route-table-id: rtb-00112233
+
+- name: Gather information about any VPC route table with a tag key Name and value Example
+ amazon.aws.ec2_vpc_route_table_info:
+ filters:
+ "tag:Name": Example
+
+- name: Gather information about any VPC route table within VPC with ID vpc-abcdef00
+ amazon.aws.ec2_vpc_route_table_info:
+ filters:
+ vpc-id: vpc-abcdef00
+'''
+
+RETURN = r'''
+route_tables:
+ description:
+ - A list of dictionarys describing route tables.
+ - See also U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_route_tables).
+ returned: always
+ type: complex
+ contains:
+ associations:
+ description: List of associations between the route table and one or more subnets or a gateway.
+ returned: always
+ type: complex
+ contains:
+ association_state:
+ description: The state of the association.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: The state of the association.
+ returned: always
+ type: str
+ sample: associated
+ state_message:
+ description: Additional information about the state of the association.
+ returned: when available
+ type: str
+ sample: 'Creating association'
+ gateway_id:
+ description: ID of the internet gateway or virtual private gateway.
+ returned: when route table is a gateway route table
+ type: str
+ sample: igw-03312309
+ main:
+ description: Whether this is the main route table.
+ returned: always
+ type: bool
+ sample: false
+ route_table_association_id:
+ description: ID of association between route table and subnet.
+ returned: always
+ type: str
+ sample: rtbassoc-ab47cfc3
+ route_table_id:
+ description: ID of the route table.
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ subnet_id:
+ description: ID of the subnet.
+ returned: when route table is a subnet route table
+ type: str
+ sample: subnet-82055af9
+ id:
+ description: ID of the route table (same as route_table_id for backwards compatibility).
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ owner_id:
+ description: ID of the account which owns the route table.
+ returned: always
+ type: str
+ sample: '012345678912'
+ propagating_vgws:
+ description: List of Virtual Private Gateways propagating routes.
+ returned: always
+ type: list
+ sample: []
+ route_table_id:
+ description: ID of the route table.
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ routes:
+ description: List of routes in the route table.
+ returned: always
+ type: complex
+ contains:
+ destination_cidr_block:
+ description: CIDR block of destination.
+ returned: always
+ type: str
+ sample: 10.228.228.0/22
+ gateway_id:
+ description: ID of the gateway.
+ returned: when gateway is local or internet gateway
+ type: str
+ sample: local
+ instance_id:
+ description:
+ - ID of a NAT instance.
+ - Empty unless the route is via an EC2 instance.
+ returned: always
+ type: str
+ sample: i-abcd123456789
+ instance_owner_id:
+ description:
+ - AWS account owning the NAT instance.
+ - Empty unless the route is via an EC2 instance.
+ returned: always
+ type: str
+ sample: 123456789012
+ network_interface_id:
+ description:
+ - The ID of the network interface.
+ - Empty unless the route is via an EC2 instance.
+ returned: always
+ type: str
+ sample: 123456789012
+ nat_gateway_id:
+ description: ID of the NAT gateway.
+ returned: when the route is via a NAT gateway.
+ type: str
+ sample: local
+ origin:
+ description: mechanism through which the route is in the table.
+ returned: always
+ type: str
+ sample: CreateRouteTable
+ state:
+ description: state of the route.
+ returned: always
+ type: str
+ sample: active
+ tags:
+ description: Tags applied to the route table.
+ returned: always
+ type: dict
+ sample:
+ Name: Public route table
+ Public: 'true'
+ vpc_id:
+ description: ID for the VPC in which the route lives.
+ returned: always
+ type: str
+ sample: vpc-6e2d2407
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.jittered_backoff()
+def describe_route_tables_with_backoff(connection, **params):
+ try:
+ paginator = connection.get_paginator('describe_route_tables')
+ return paginator.paginate(**params).build_full_result()
+ except is_boto3_error_code('InvalidRouteTableID.NotFound'):
+ return None
+
+
+def normalize_route(route):
+ # Historically these were all there, but set to null when empty'
+ for legacy_key in ['DestinationCidrBlock', 'GatewayId', 'InstanceId',
+ 'Origin', 'State', 'NetworkInterfaceId']:
+ if legacy_key not in route:
+ route[legacy_key] = None
+ route['InterfaceId'] = route['NetworkInterfaceId']
+ return route
+
+
+def normalize_association(assoc):
+ # Name change between boto v2 and boto v3, return both
+ assoc['Id'] = assoc['RouteTableAssociationId']
+ return assoc
+
+
+def normalize_route_table(table):
+ table['tags'] = boto3_tag_list_to_ansible_dict(table['Tags'])
+ table['Associations'] = [normalize_association(assoc) for assoc in table['Associations']]
+ table['Routes'] = [normalize_route(route) for route in table['Routes']]
+ table['Id'] = table['RouteTableId']
+ del table['Tags']
+ return camel_dict_to_snake_dict(table, ignore_list=['tags'])
+
+
+def normalize_results(results):
+ """
+ We used to be a boto v2 module, make sure that the old return values are
+ maintained and the shape of the return values are what people expect
+ """
+
+ routes = [normalize_route_table(route) for route in results['RouteTables']]
+ del results['RouteTables']
+ results = camel_dict_to_snake_dict(results)
+ results['route_tables'] = routes
+ return results
+
+
+def list_ec2_vpc_route_tables(connection, module):
+
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ results = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get route tables")
+
+ results = normalize_results(results)
+ module.exit_json(changed=False, **results)
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default=None, type='dict'),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ list_ec2_vpc_route_tables(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py
new file mode 100644
index 00000000..5cbd8f24
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py
@@ -0,0 +1,570 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet
+version_added: 1.0.0
+short_description: Manage subnets in AWS virtual private clouds
+description:
+ - Manage subnets in AWS virtual private clouds.
+author:
+ - Robert Estelle (@erydo)
+ - Brad Davidson (@brandond)
+options:
+ az:
+ description:
+ - The availability zone for the subnet.
+ - Required if I(outpost_arn) is set.
+ type: str
+ cidr:
+ description:
+ - The CIDR block for the subnet. E.g. C(192.0.2.0/24).
+ type: str
+ required: true
+ ipv6_cidr:
+ description:
+ - The IPv6 CIDR block for the subnet.
+ - The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range.
+ - Required if I(assign_instances_ipv6=true)
+ type: str
+ outpost_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the Outpost.
+ - If set, allows to create subnet in an Outpost.
+ - If I(outpost_arn) is set, I(az) must also be specified.
+ type: str
+ state:
+ description:
+ - Create or remove the subnet.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ vpc_id:
+ description:
+ -"VPC ID of the VPC in which to create or delete the subnet.
+ required: true
+ type: str
+ map_public:
+ description:
+ - Whether instances launched into the subnet should default to being assigned public IP address.
+ type: bool
+ default: false
+ assign_instances_ipv6:
+ description:
+ - Whether instances launched into the subnet should default to being automatically assigned an IPv6 address.
+ - If I(assign_instances_ipv6=true), I(ipv6_cidr) must also be specified.
+ type: bool
+ default: false
+ wait:
+ description:
+ - Whether to wait for changes to complete.
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - Number of seconds to wait for changes to complete
+ - Ignored unless I(wait=True).
+ default: 300
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create subnet for database servers
+ amazon.aws.ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+ tags:
+ Name: Database Subnet
+ register: database_subnet
+
+- name: Remove subnet for database servers
+ amazon.aws.ec2_vpc_subnet:
+ state: absent
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+
+- name: Create subnet with IPv6 block assigned
+ amazon.aws.ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: 2001:db8:0:102::/64
+
+- name: Remove IPv6 block assigned to subnet
+ amazon.aws.ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: ''
+'''
+
+RETURN = '''
+subnet:
+ description: Dictionary of subnet values
+ returned: I(state=present)
+ type: complex
+ contains:
+ id:
+ description: Subnet resource id
+ returned: I(state=present)
+ type: str
+ sample: subnet-b883b2c4
+ cidr_block:
+ description: The IPv4 CIDR of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "10.0.0.0/16"
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block actively associated with the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "2001:db8:0:102::/64"
+ availability_zone:
+ description: Availability zone of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: us-east-1a
+ state:
+ description: state of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the Subnet, includes name
+ returned: I(state=present)
+ type: dict
+ sample: {"Name": "My Subnet", "env": "staging"}
+ map_public_ip_on_launch:
+ description: whether public IP is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ assign_ipv6_address_on_creation:
+ description: whether IPv6 address is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ vpc_id:
+ description: the id of the VPC where this Subnet exists
+ returned: I(state=present)
+ type: str
+ sample: vpc-67236184
+ available_ip_address_count:
+ description: number of available IPv4 addresses
+ returned: I(state=present)
+ type: str
+ sample: 251
+ default_for_az:
+ description: indicates whether this is the default Subnet for this Availability Zone
+ returned: I(state=present)
+ type: bool
+ sample: false
+ ipv6_association_id:
+ description: The IPv6 association ID for the currently associated CIDR
+ returned: I(state=present)
+ type: str
+ sample: subnet-cidr-assoc-b85c74d2
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: I(state=present)
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the subnet.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+'''
+
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+def get_subnet_info(subnet):
+ if 'Subnets' in subnet:
+ return [get_subnet_info(s) for s in subnet['Subnets']]
+ elif 'Subnet' in subnet:
+ subnet = camel_dict_to_snake_dict(subnet['Subnet'])
+ else:
+ subnet = camel_dict_to_snake_dict(subnet)
+
+ if 'tags' in subnet:
+ subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
+ else:
+ subnet['tags'] = dict()
+
+ if 'subnet_id' in subnet:
+ subnet['id'] = subnet['subnet_id']
+ del subnet['subnet_id']
+
+ subnet['ipv6_cidr_block'] = ''
+ subnet['ipv6_association_id'] = ''
+ ipv6set = subnet.get('ipv6_cidr_block_association_set')
+ if ipv6set:
+ for item in ipv6set:
+ if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
+ subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
+ subnet['ipv6_association_id'] = item['association_id']
+
+ return subnet
+
+
+def waiter_params(module, params, start_time):
+ remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
+ params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
+ return params
+
+
+def handle_waiter(conn, module, waiter_name, params, start_time):
+ try:
+ get_waiter(conn, waiter_name).wait(
+ **waiter_params(module, params, start_time)
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, "Failed to wait for updates to complete")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "An exception happened while trying to wait for updates")
+
+
+def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, outpost_arn=None, az=None, start_time=None):
+ wait = module.params['wait']
+
+ params = dict(VpcId=vpc_id,
+ CidrBlock=cidr)
+
+ if ipv6_cidr:
+ params['Ipv6CidrBlock'] = ipv6_cidr
+
+ if az:
+ params['AvailabilityZone'] = az
+
+ if outpost_arn:
+ if is_outpost_arn(outpost_arn):
+ params['OutpostArn'] = outpost_arn
+ else:
+ module.fail_json('OutpostArn does not match the pattern specified in API specifications.')
+
+ try:
+ subnet = get_subnet_info(conn.create_subnet(aws_retry=True, **params))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create subnet")
+
+ # Sometimes AWS takes its time to create a subnet and so using
+ # new subnets's id to do things like create tags results in
+ # exception.
+ if wait and subnet.get('state') != 'available':
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+ handle_waiter(conn, module, 'subnet_available', {'SubnetIds': [subnet['id']]}, start_time)
+ subnet['state'] = 'available'
+
+ return subnet
+
+
+def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
+
+ changed = ensure_ec2_tags(
+ conn, module, subnet['id'],
+ resource_type='subnet',
+ purge_tags=purge_tags,
+ tags=tags,
+ retry_codes=['InvalidSubnetID.NotFound'])
+
+ if module.params['wait'] and not module.check_mode:
+ # Wait for tags to be updated
+ filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ return changed
+
+
+def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet['id'],
+ MapPublicIpOnLaunch={'Value': map_public})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet['id'],
+ AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def disassociate_ipv6_cidr(conn, module, subnet, start_time):
+ if subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
+
+ try:
+ conn.disassociate_subnet_cidr_block(aws_retry=True, AssociationId=subnet['ipv6_association_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
+ .format(subnet['ipv6_association_id'], subnet['id']))
+
+ # Wait for cidr block to be disassociated
+ if module.params['wait']:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['disassociated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+
+def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
+ wait = module.params['wait']
+ changed = False
+
+ if subnet['ipv6_association_id'] and not ipv6_cidr:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ if ipv6_cidr:
+ filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
+ 'vpc-id': subnet['vpc_id']})
+
+ try:
+ _subnets = conn.describe_subnets(aws_retry=True, Filters=filters)
+ check_subnets = get_subnet_info(_subnets)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get subnet info")
+
+ if check_subnets and check_subnets[0]['ipv6_cidr_block']:
+ module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
+
+ if subnet['ipv6_association_id']:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ try:
+ if not check_mode:
+ associate_resp = conn.associate_subnet_cidr_block(aws_retry=True, SubnetId=subnet['id'],
+ Ipv6CidrBlock=ipv6_cidr)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
+ else:
+ if not check_mode and wait:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['associated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
+ subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
+ subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
+ if subnet['ipv6_cidr_block_association_set']:
+ subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
+ else:
+ subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
+
+ return changed
+
+
+def get_matching_subnet(conn, module, vpc_id, cidr):
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
+ try:
+ _subnets = conn.describe_subnets(aws_retry=True, Filters=filters)
+ subnets = get_subnet_info(_subnets)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get matching subnet")
+
+ if subnets:
+ return subnets[0]
+
+ return None
+
+
+def ensure_subnet_present(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ changed = False
+
+ # Initialize start so max time does not exceed the specified wait_timeout for multiple operations
+ start_time = time.time()
+
+ if subnet is None:
+ if not module.check_mode:
+ subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
+ ipv6_cidr=module.params['ipv6_cidr'], outpost_arn=module.params['outpost_arn'],
+ az=module.params['az'], start_time=start_time)
+ changed = True
+ # Subnet will be None when check_mode is true
+ if subnet is None:
+ return {
+ 'changed': changed,
+ 'subnet': {}
+ }
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
+ if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
+ changed = True
+
+ if module.params['map_public'] != subnet['map_public_ip_on_launch']:
+ ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['tags'] != subnet['tags']:
+ stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
+ if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
+ changed = True
+
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if not module.check_mode and module.params['wait']:
+ # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
+ # so we only wait for those if necessary just before returning the subnet
+ subnet = ensure_final_subnet(conn, module, subnet, start_time)
+
+ return {
+ 'changed': changed,
+ 'subnet': subnet
+ }
+
+
+def ensure_final_subnet(conn, module, subnet, start_time):
+ for _rewait in range(0, 30):
+ map_public_correct = False
+ assign_ipv6_correct = False
+
+ if module.params['map_public'] == subnet['map_public_ip_on_launch']:
+ map_public_correct = True
+ else:
+ if module.params['map_public']:
+ handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
+ assign_ipv6_correct = True
+ else:
+ if module.params['assign_instances_ipv6']:
+ handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if map_public_correct and assign_ipv6_correct:
+ break
+
+ time.sleep(5)
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+
+ return subnet
+
+
+def ensure_subnet_absent(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if subnet is None:
+ return {'changed': False}
+
+ try:
+ if not module.check_mode:
+ conn.delete_subnet(aws_retry=True, SubnetId=subnet['id'])
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
+ return {'changed': True}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete subnet")
+
+
+def main():
+ argument_spec = dict(
+ az=dict(default=None, required=False),
+ cidr=dict(required=True),
+ ipv6_cidr=dict(default='', required=False),
+ outpost_arn=dict(default='', type='str', required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id=dict(required=True),
+ map_public=dict(default=False, required=False, type='bool'),
+ assign_instances_ipv6=dict(default=False, required=False, type='bool'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300, required=False),
+ purge_tags=dict(default=True, type='bool')
+ )
+
+ required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+
+ if module.params.get('outpost_arn') and not module.params.get('az'):
+ module.fail_json(msg="To specify OutpostArn, you must specify the Availability Zone of the Outpost subnet.")
+
+ if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
+ module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
+
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+ connection = module.client('ec2', retry_decorator=retry_decorator)
+
+ state = module.params.get('state')
+
+ try:
+ if state == 'present':
+ result = ensure_subnet_present(connection, module)
+ elif state == 'absent':
+ result = ensure_subnet_absent(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py
new file mode 100644
index 00000000..9c25796a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPC subnets in AWS
+description:
+ - Gather information about ec2 VPC subnets in AWS
+author: "Rob White (@wimnat)"
+options:
+ subnet_ids:
+ description:
+ - A list of subnet IDs to gather information for.
+ aliases: ['subnet_id']
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all VPC subnets
+- amazon.aws.ec2_vpc_subnet_info:
+
+# Gather information about a particular VPC subnet using ID
+- amazon.aws.ec2_vpc_subnet_info:
+ subnet_ids: subnet-00112233
+
+# Gather information about any VPC subnet with a tag key Name and value Example
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any VPC subnet within VPC with ID vpc-abcdef00
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ vpc-id: vpc-abcdef00
+
+# Gather information about a set of VPC subnets, publicA, publicB and publicC within a
+# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
+# subnet_ids as a list.
+
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ vpc-id: vpc-abcdef00
+ "tag:Name": "{{ item }}"
+ loop:
+ - publicA
+ - publicB
+ - publicC
+ register: subnet_info
+
+- set_fact:
+ subnet_ids: "{{ subnet_info.subnets|map(attribute='id')|list }}"
+'''
+
+RETURN = '''
+subnets:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ subnet_id:
+ description: The ID of the Subnet.
+ returned: always
+ type: str
+ id:
+ description: The ID of the Subnet (for backwards compatibility).
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC .
+ returned: always
+ type: str
+ state:
+ description: The state of the subnet.
+ returned: always
+ type: str
+ tags:
+ description: A dict of tags associated with the Subnet.
+ returned: always
+ type: dict
+ map_public_ip_on_launch:
+ description: True/False depending on attribute setting for public IP mapping.
+ returned: always
+ type: bool
+ default_for_az:
+ description: True if this is the default subnet for AZ.
+ returned: always
+ type: bool
+ cidr_block:
+ description: The IPv4 CIDR block assigned to the subnet.
+ returned: always
+ type: str
+ available_ip_address_count:
+ description: Count of available IPs in subnet.
+ returned: always
+ type: str
+ availability_zone:
+ description: The availability zone where the subnet exists.
+ returned: always
+ type: str
+ assign_ipv6_address_on_creation:
+ description: True/False depending on attribute setting for IPv6 address assignment.
+ returned: always
+ type: bool
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the subnet.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(connection, subnet_ids, filters):
+ """
+ Describe Subnets with AWSRetry backoff throttling support.
+
+ connection : boto3 client connection object
+ subnet_ids : list of subnet ids for which to gather information
+ filters : additional filters to apply to request
+ """
+ return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters)
+
+
+def describe_subnets(connection, module):
+ """
+ Describe Subnets.
+
+ module : AnsibleAWSModule object
+ connection : boto3 client connection object
+ """
+ # collect parameters
+ filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ subnet_ids = module.params.get('subnet_ids')
+
+ if subnet_ids is None:
+ # Set subnet_ids to empty list if it is None
+ subnet_ids = []
+
+ # init empty list for return vars
+ subnet_info = list()
+
+ # Get the basic VPC info
+ try:
+ response = describe_subnets_with_backoff(connection, subnet_ids, filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to describe subnets')
+
+ for subnet in response['Subnets']:
+ # for backwards compatibility
+ subnet['id'] = subnet['SubnetId']
+ subnet_info.append(camel_dict_to_snake_dict(subnet))
+ # convert tag list to ansible dict
+ subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', []))
+
+ module.exit_json(subnets=subnet_info)
+
+
+def main():
+ argument_spec = dict(
+ subnet_ids=dict(type='list', elements='str', default=[], aliases=['subnet_id']),
+ filters=dict(type='dict', default={})
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ connection = module.client('ec2')
+
+ describe_subnets(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py
new file mode 100644
index 00000000..08d62a7b
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py
@@ -0,0 +1,828 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_application_lb
+version_added: 5.0.0
+short_description: Manage an Application Load Balancer
+description:
+ - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - "Rob White (@wimnat)"
+options:
+ access_logs_enabled:
+ description:
+ - Whether or not to enable access logs.
+ - When set, I(access_logs_s3_bucket) must also be set.
+ type: bool
+ access_logs_s3_bucket:
+ description:
+ - The name of the S3 bucket for the access logs.
+ - The bucket must exist in the same
+ region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.
+ - Required if access logs in Amazon S3 are enabled.
+ - When set, I(access_logs_enabled) must also be set.
+ type: str
+ access_logs_s3_prefix:
+ description:
+ - The prefix for the log location in the S3 bucket.
+ - If you don't specify a prefix, the access logs are stored in the root of the bucket.
+ - Cannot begin or end with a slash.
+ type: str
+ deletion_protection:
+ description:
+ - Indicates whether deletion protection for the ALB is enabled.
+ - Defaults to C(False).
+ type: bool
+ http2:
+ description:
+ - Indicates whether to enable HTTP2 routing.
+ - Defaults to C(True).
+ type: bool
+ http_desync_mitigation_mode:
+ description:
+ - Determines how the load balancer handles requests that might pose a security risk to an application.
+ - Defaults to C('defensive')
+ type: str
+ choices: ['monitor', 'defensive', 'strictest']
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ http_drop_invalid_header_fields:
+ description:
+ - Indicates whether HTTP headers with invalid header fields are removed by the load balancer C(True) or routed to targets C(False).
+ - Defaults to C(False).
+ type: bool
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ http_x_amzn_tls_version_and_cipher_suite:
+ description:
+ - Indicates whether the two headers are added to the client request before sending it to the target.
+ - Defaults to C(False).
+ type: bool
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ http_xff_client_port:
+ description:
+ - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer.
+ - Defaults to C(False).
+ type: bool
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ idle_timeout:
+ description:
+ - The number of seconds to wait before an idle connection is closed.
+ type: int
+ listeners:
+ description:
+ - A list of dicts containing listeners to attach to the ALB. See examples for detail of the dict required. Note that listener keys
+ are CamelCased.
+ type: list
+ elements: dict
+ suboptions:
+ Port:
+ description: The port on which the load balancer is listening.
+ required: true
+ type: int
+ Protocol:
+ description: The protocol for connections from clients to the load balancer.
+ required: true
+ type: str
+ Certificates:
+ description: The SSL server certificate.
+ type: list
+ elements: dict
+ suboptions:
+ CertificateArn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ type: str
+ SslPolicy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ type: str
+ DefaultActions:
+ description: The default actions for the listener.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ Type:
+ description: The type of action.
+ type: str
+ TargetGroupArn:
+ description:
+ - The Amazon Resource Name (ARN) of the target group.
+ - Mutually exclusive with I(TargetGroupName).
+ type: str
+ TargetGroupName:
+ description:
+ - The name of the target group.
+ - Mutually exclusive with I(TargetGroupArn).
+ Rules:
+ type: list
+ elements: dict
+ description:
+ - A list of ALB Listener Rules.
+ - 'For the complete documentation of possible Conditions and Actions please see the boto3 documentation:'
+ - 'https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_rule'
+ suboptions:
+ Conditions:
+ type: list
+ description: Conditions which must be met for the actions to be applied.
+ elements: dict
+ Priority:
+ type: int
+ description: The rule priority.
+ Actions:
+ type: list
+ description: Actions to apply if all of the rule's conditions are met.
+ elements: dict
+ name:
+ description:
+ - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric
+ characters or hyphens, and must not begin or end with a hyphen.
+ required: true
+ type: str
+ purge_listeners:
+ description:
+ - If C(true), existing listeners will be purged from the ALB to match exactly what is defined by I(listeners) parameter.
+ - If the I(listeners) parameter is not set then listeners will not be modified.
+ default: true
+ type: bool
+ subnets:
+ description:
+ - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from
+ at least two Availability Zones.
+ - Required if I(state=present).
+ type: list
+ elements: str
+ security_groups:
+ description:
+ - A list of the names or IDs of the security groups to assign to the load balancer.
+ - Required if I(state=present).
+ - If C([]), the VPC's default security group will be used.
+ type: list
+ elements: str
+ scheme:
+ description:
+ - Internet-facing or internal load balancer. An ALB scheme can not be modified after creation.
+ default: internet-facing
+ choices: [ 'internet-facing', 'internal' ]
+ type: str
+ state:
+ description:
+ - Create or destroy the load balancer.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ wait:
+ description:
+ - Wait for the load balancer to have a state of 'active' before completing. A status check is
+ performed every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - The time in seconds to use in conjunction with I(wait).
+ type: int
+ purge_rules:
+ description:
+ - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete.
+ default: true
+ type: bool
+ ip_address_type:
+ description:
+ - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer.
+ choices: [ 'ipv4', 'dualstack' ]
+ type: str
+ waf_fail_open:
+ description:
+ - Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF.
+ - Defaults to C(False).
+ type: bool
+ version_added: 3.2.0
+ version_added_collection: community.aws
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+notes:
+ - Listeners are matched based on port. If a listener's port is changed then a new listener will be created.
+ - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created.
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ALB and attach a listener
+- amazon.aws.elb_application_lb:
+ name: myalb
+ security_groups:
+ - sg-12345678
+ - my-sec-group
+ subnets:
+ - subnet-012345678
+ - subnet-abcdef000
+ listeners:
+ - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
+ SslPolicy: ELBSecurityPolicy-2015-05
+ Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
+ DefaultActions:
+ - Type: forward # Required.
+ TargetGroupName: # Required. The name of the target group
+ state: present
+
+# Create an ALB and attach a listener with logging enabled
+- amazon.aws.elb_application_lb:
+ access_logs_enabled: true
+ access_logs_s3_bucket: mybucket
+ access_logs_s3_prefix: "logs"
+ name: myalb
+ security_groups:
+ - sg-12345678
+ - my-sec-group
+ subnets:
+ - subnet-012345678
+ - subnet-abcdef000
+ listeners:
+ - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
+ SslPolicy: ELBSecurityPolicy-2015-05
+ Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
+ DefaultActions:
+ - Type: forward # Required.
+ TargetGroupName: # Required. The name of the target group
+ state: present
+
+# Create an ALB with listeners and rules
+- amazon.aws.elb_application_lb:
+ name: test-alb
+ subnets:
+ - subnet-12345678
+ - subnet-87654321
+ security_groups:
+ - sg-12345678
+ scheme: internal
+ listeners:
+ - Protocol: HTTPS
+ Port: 443
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: test-target-group
+ Certificates:
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
+ SslPolicy: ELBSecurityPolicy-2015-05
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - '/test'
+ Priority: '1'
+ Actions:
+ - TargetGroupName: test-target-group
+ Type: forward
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/redirect-path/*"
+ Priority: '2'
+ Actions:
+ - Type: redirect
+ RedirectConfig:
+ Host: "#{host}"
+ Path: "/example/redir" # or /#{path}
+ Port: "#{port}"
+ Protocol: "#{protocol}"
+ Query: "#{query}"
+ StatusCode: "HTTP_302" # or HTTP_301
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - "/fixed-response-path/"
+ Priority: '3'
+ Actions:
+ - Type: fixed-response
+ FixedResponseConfig:
+ ContentType: "text/plain"
+ MessageBody: "This is the page you're looking for"
+ StatusCode: "200"
+ - Conditions:
+ - Field: host-header
+ Values:
+ - "hostname.domain.com"
+ - "alternate.domain.com"
+ Priority: '4'
+ Actions:
+ - TargetGroupName: test-target-group
+ Type: forward
+ state: present
+
+# Remove an ALB
+- amazon.aws.elb_application_lb:
+ name: myalb
+ state: absent
+
+'''
+
+RETURN = r'''
+access_logs_s3_bucket:
+ description: The name of the S3 bucket for the access logs.
+ returned: when state is present
+ type: str
+ sample: "mys3bucket"
+access_logs_s3_enabled:
+ description: Indicates whether access logs stored in Amazon S3 are enabled.
+ returned: when state is present
+ type: bool
+ sample: true
+access_logs_s3_prefix:
+ description: The prefix for the location in the S3 bucket.
+ returned: when state is present
+ type: str
+ sample: "my/logs"
+availability_zones:
+ description: The Availability Zones for the load balancer.
+ returned: when state is present
+ type: list
+ sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }]
+canonical_hosted_zone_id:
+ description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
+ returned: when state is present
+ type: str
+ sample: "ABCDEF12345678"
+changed:
+ description: Whether an ALB was created/updated/deleted
+ returned: always
+ type: bool
+ sample: true
+created_time:
+ description: The date and time the load balancer was created.
+ returned: when state is present
+ type: str
+ sample: "2015-02-12T02:14:02+00:00"
+deletion_protection_enabled:
+ description: Indicates whether deletion protection is enabled.
+ returned: when state is present
+ type: bool
+ sample: true
+dns_name:
+ description: The public DNS name of the load balancer.
+ returned: when state is present
+ type: str
+ sample: "internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com"
+idle_timeout_timeout_seconds:
+ description: The idle timeout value, in seconds.
+ returned: when state is present
+ type: int
+ sample: 60
+ip_address_type:
+ description: The type of IP addresses used by the subnets for the load balancer.
+ returned: when state is present
+ type: str
+ sample: "ipv4"
+listeners:
+ description: Information about the listeners.
+ returned: when state is present
+ type: complex
+ contains:
+ listener_arn:
+ description: The Amazon Resource Name (ARN) of the listener.
+ returned: when state is present
+ type: str
+ sample: ""
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when state is present
+ type: str
+ sample: ""
+ port:
+ description: The port on which the load balancer is listening.
+ returned: when state is present
+ type: int
+ sample: 80
+ protocol:
+ description: The protocol for connections from clients to the load balancer.
+ returned: when state is present
+ type: str
+ sample: "HTTPS"
+ certificates:
+ description: The SSL server certificate.
+ returned: when state is present
+ type: complex
+ contains:
+ certificate_arn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ returned: when state is present
+ type: str
+ sample: ""
+ ssl_policy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ returned: when state is present
+ type: str
+ sample: ""
+ default_actions:
+ description: The default actions for the listener.
+ returned: when state is present
+ type: str
+ contains:
+ type:
+ description: The type of action.
+ returned: when state is present
+ type: str
+ sample: ""
+ target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ returned: when state is present
+ type: str
+ sample: ""
+load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ returned: when state is present
+ type: str
+ sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-alb/001122334455"
+load_balancer_name:
+ description: The name of the load balancer.
+ returned: when state is present
+ type: str
+ sample: "my-alb"
+routing_http2_enabled:
+ description: Indicates whether HTTP/2 is enabled.
+ returned: when state is present
+ type: bool
+ sample: true
+routing_http_desync_mitigation_mode:
+ description: Determines how the load balancer handles requests that might pose a security risk to an application.
+ returned: when state is present
+ type: str
+ sample: "defensive"
+routing_http_drop_invalid_header_fields_enabled:
+ description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false).
+ returned: when state is present
+ type: bool
+ sample: false
+routing_http_x_amzn_tls_version_and_cipher_suite_enabled:
+ description: Indicates whether the two headers are added to the client request before sending it to the target.
+ returned: when state is present
+ type: bool
+ sample: false
+routing_http_xff_client_port_enabled:
+ description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer.
+ returned: when state is present
+ type: bool
+ sample: false
+scheme:
+ description: Internet-facing or internal load balancer.
+ returned: when state is present
+ type: str
+ sample: "internal"
+security_groups:
+ description: The IDs of the security groups for the load balancer.
+ returned: when state is present
+ type: list
+ sample: ['sg-0011223344']
+state:
+ description: The state of the load balancer.
+ returned: when state is present
+ type: dict
+ sample: {'code': 'active'}
+tags:
+ description: The tags attached to the load balancer.
+ returned: when state is present
+ type: dict
+ sample: {
+ 'Tag': 'Example'
+ }
+type:
+ description: The type of load balancer.
+ returned: when state is present
+ type: str
+ sample: "application"
+vpc_id:
+ description: The ID of the VPC for the load balancer.
+ returned: when state is present
+ type: str
+ sample: "vpc-0011223344"
+waf_fail_open_enabled:
+ description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF.
+ returned: when state is present
+ type: bool
+ sample: false
+'''
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import (
+ ApplicationLoadBalancer,
+ ELBListener,
+ ELBListenerRule,
+ ELBListenerRules,
+ ELBListeners,
+)
+from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules
+
+
+@AWSRetry.jittered_backoff()
+def describe_sgs_with_backoff(connection, **params):
+ paginator = connection.get_paginator('describe_security_groups')
+ return paginator.paginate(**params).build_full_result()['SecurityGroups']
+
+
+def find_default_sg(connection, module, vpc_id):
+ """
+ Finds the default security group for the given VPC ID.
+ """
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'group-name': 'default'})
+ try:
+ sg = describe_sgs_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='No default security group found for VPC {0}'.format(vpc_id))
+ if len(sg) == 1:
+ return sg[0]['GroupId']
+ elif len(sg) == 0:
+ module.fail_json(msg='No default security group found for VPC {0}'.format(vpc_id))
+ else:
+ module.fail_json(msg='Multiple security groups named "default" found for VPC {0}'.format(vpc_id))
+
+
+def create_or_update_alb(alb_obj):
+ """Create ALB or modify main attributes. json_exit here"""
+ if alb_obj.elb:
+ # ALB exists so check subnets, security groups and tags match what has been passed
+ # Subnets
+ if not alb_obj.compare_subnets():
+ if alb_obj.module.check_mode:
+ alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.')
+ alb_obj.modify_subnets()
+
+ # Security Groups
+ if not alb_obj.compare_security_groups():
+ if alb_obj.module.check_mode:
+ alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.')
+ alb_obj.modify_security_groups()
+
+ # ALB attributes
+ if not alb_obj.compare_elb_attributes():
+ if alb_obj.module.check_mode:
+ alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.')
+ alb_obj.update_elb_attributes()
+ alb_obj.modify_elb_attributes()
+
+ # Tags - only need to play with tags if tags parameter has been set to something
+ if alb_obj.tags is not None:
+
+ tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(alb_obj.elb['tags']),
+ boto3_tag_list_to_ansible_dict(alb_obj.tags), alb_obj.purge_tags)
+
+ # Exit on check_mode
+ if alb_obj.module.check_mode and (tags_need_modify or tags_to_delete):
+ alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.')
+
+ # Delete necessary tags
+ if tags_to_delete:
+ alb_obj.delete_tags(tags_to_delete)
+
+ # Add/update tags
+ if tags_need_modify:
+ alb_obj.modify_tags()
+
+ else:
+ # Create load balancer
+ if alb_obj.module.check_mode:
+ alb_obj.module.exit_json(changed=True, msg='Would have created ALB if not in check mode.')
+ alb_obj.create_elb()
+
+ # Listeners
+ listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'])
+ listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners()
+
+ # Exit on check_mode
+ if alb_obj.module.check_mode and (listeners_to_add or listeners_to_modify or listeners_to_delete):
+ alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.')
+
+ # Delete listeners
+ for listener_to_delete in listeners_to_delete:
+ listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn'])
+ listener_obj.delete()
+ listeners_obj.changed = True
+
+ # Add listeners
+ for listener_to_add in listeners_to_add:
+ listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb['LoadBalancerArn'])
+ listener_obj.add()
+ listeners_obj.changed = True
+
+ # Modify listeners
+ for listener_to_modify in listeners_to_modify:
+ listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb['LoadBalancerArn'])
+ listener_obj.modify()
+ listeners_obj.changed = True
+
+ # If listeners changed, mark ALB as changed
+ if listeners_obj.changed:
+ alb_obj.changed = True
+
+ # Rules of each listener
+ for listener in listeners_obj.listeners:
+ if 'Rules' in listener:
+ rules_obj = ELBListenerRules(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port'])
+ rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules()
+
+ # Exit on check_mode
+ if alb_obj.module.check_mode and (rules_to_add or rules_to_modify or rules_to_delete):
+ alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.')
+
+ # Delete rules
+ if alb_obj.module.params['purge_rules']:
+ for rule in rules_to_delete:
+ rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn)
+ rule_obj.delete()
+ alb_obj.changed = True
+
+ # Add rules
+ for rule in rules_to_add:
+ rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn)
+ rule_obj.create()
+ alb_obj.changed = True
+
+ # Modify rules
+ for rule in rules_to_modify:
+ rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn)
+ rule_obj.modify()
+ alb_obj.changed = True
+
+ # Update ALB ip address type only if option has been provided
+ if alb_obj.module.params.get('ip_address_type') and alb_obj.elb_ip_addr_type != alb_obj.module.params.get('ip_address_type'):
+ # Exit on check_mode
+ if alb_obj.module.check_mode:
+ alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.')
+
+ alb_obj.modify_ip_address_type(alb_obj.module.params.get('ip_address_type'))
+
+ # Exit on check_mode - no changes
+ if alb_obj.module.check_mode:
+ alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - no changes to make to ALB specified.')
+
+ # Get the ALB again
+ alb_obj.update()
+
+ # Get the ALB listeners again
+ listeners_obj.update()
+
+ # Update the ALB attributes
+ alb_obj.update_elb_attributes()
+
+ # Convert to snake_case and merge in everything we want to return to the user
+ snaked_alb = camel_dict_to_snake_dict(alb_obj.elb)
+ snaked_alb.update(camel_dict_to_snake_dict(alb_obj.elb_attributes))
+ snaked_alb['listeners'] = []
+ for listener in listeners_obj.current_listeners:
+ # For each listener, get listener rules
+ listener['rules'] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener['ListenerArn'])
+ snaked_alb['listeners'].append(camel_dict_to_snake_dict(listener))
+
+ # Change tags to ansible friendly dict
+ snaked_alb['tags'] = boto3_tag_list_to_ansible_dict(snaked_alb['tags'])
+
+ # ip address type
+ snaked_alb['ip_address_type'] = alb_obj.get_elb_ip_address_type()
+
+ alb_obj.module.exit_json(changed=alb_obj.changed, **snaked_alb)
+
+
+def delete_alb(alb_obj):
+
+ if alb_obj.elb:
+
+ # Exit on check_mode
+ if alb_obj.module.check_mode:
+ alb_obj.module.exit_json(changed=True, msg='Would have deleted ALB if not in check mode.')
+
+ listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'])
+ for listener_to_delete in [i['ListenerArn'] for i in listeners_obj.current_listeners]:
+ listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn'])
+ listener_obj.delete()
+
+ alb_obj.delete()
+
+ else:
+
+ # Exit on check_mode - no changes
+ if alb_obj.module.check_mode:
+ alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - ALB already absent.')
+
+ alb_obj.module.exit_json(changed=alb_obj.changed)
+
+
+def main():
+
+ argument_spec = dict(
+ access_logs_enabled=dict(type='bool'),
+ access_logs_s3_bucket=dict(type='str'),
+ access_logs_s3_prefix=dict(type='str'),
+ deletion_protection=dict(type='bool'),
+ http2=dict(type='bool'),
+ http_desync_mitigation_mode=dict(type='str', choices=['monitor', 'defensive', 'strictest']),
+ http_drop_invalid_header_fields=dict(type='bool'),
+ http_x_amzn_tls_version_and_cipher_suite=dict(type='bool'),
+ http_xff_client_port=dict(type='bool'),
+ idle_timeout=dict(type='int'),
+ listeners=dict(type='list',
+ elements='dict',
+ options=dict(
+ Protocol=dict(type='str', required=True),
+ Port=dict(type='int', required=True),
+ SslPolicy=dict(type='str'),
+ Certificates=dict(type='list', elements='dict'),
+ DefaultActions=dict(type='list', required=True, elements='dict'),
+ Rules=dict(type='list', elements='dict')
+ )
+ ),
+ name=dict(required=True, type='str'),
+ purge_listeners=dict(default=True, type='bool'),
+ purge_tags=dict(default=True, type='bool'),
+ subnets=dict(type='list', elements='str'),
+ security_groups=dict(type='list', elements='str'),
+ scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ waf_fail_open=dict(type='bool'),
+ wait_timeout=dict(type='int'),
+ wait=dict(default=False, type='bool'),
+ purge_rules=dict(default=True, type='bool'),
+ ip_address_type=dict(type='str', choices=['ipv4', 'dualstack'])
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['subnets', 'security_groups'])
+ ],
+ required_together=[
+ ['access_logs_enabled', 'access_logs_s3_bucket']
+ ],
+ supports_check_mode=True,
+ )
+
+ # Quick check of listeners parameters
+ listeners = module.params.get("listeners")
+ if listeners is not None:
+ for listener in listeners:
+ for key in listener.keys():
+ if key == 'Protocol' and listener[key] == 'HTTPS':
+ if listener.get('SslPolicy') is None:
+ module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS")
+
+ if listener.get('Certificates') is None:
+ module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS")
+
+ connection = module.client('elbv2')
+ connection_ec2 = module.client('ec2')
+
+ state = module.params.get("state")
+
+ alb = ApplicationLoadBalancer(connection, connection_ec2, module)
+
+ # Update security group if default is specified
+ if alb.elb and module.params.get('security_groups') == []:
+ module.params['security_groups'] = [find_default_sg(connection_ec2, module, alb.elb['VpcId'])]
+ alb = ApplicationLoadBalancer(connection, connection_ec2, module)
+
+ if state == 'present':
+ create_or_update_alb(alb)
+ elif state == 'absent':
+ delete_alb(alb)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py
new file mode 100644
index 00000000..42ad25a8
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: elb_application_lb_info
+version_added: 5.0.0
+short_description: Gather information about Application Load Balancers in AWS
+description:
+ - Gather information about Application Load Balancers in AWS
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - Rob White (@wimnat)
+options:
+ load_balancer_arns:
+ description:
+ - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
+ required: false
+ type: list
+ elements: str
+ names:
+ description:
+ - The names of the load balancers.
+ required: false
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all ALBs
+ amazon.aws.elb_application_lb_info:
+
+- name: Gather information about a particular ALB given its ARN
+ amazon.aws.elb_application_lb_info:
+ load_balancer_arns:
+ - "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-alb/aabbccddeeff"
+
+- name: Gather information about ALBs named 'alb1' and 'alb2'
+ amazon.aws.elb_application_lb_info:
+ names:
+ - alb1
+ - alb2
+
+- name: Gather information about specific ALB
+ amazon.aws.elb_application_lb_info:
+ names: "alb-name"
+ region: "aws-region"
+ register: alb_info
+- ansible.builtin.debug:
+ var: alb_info
+'''
+
+RETURN = r'''
+load_balancers:
+ description: a list of load balancers
+ returned: always
+ type: complex
+ contains:
+ access_logs_s3_bucket:
+ description: The name of the S3 bucket for the access logs.
+ type: str
+ sample: "mys3bucket"
+ access_logs_s3_enabled:
+ description: Indicates whether access logs stored in Amazon S3 are enabled.
+ type: bool
+ sample: true
+ access_logs_s3_prefix:
+ description: The prefix for the location in the S3 bucket.
+ type: str
+ sample: "my/logs"
+ availability_zones:
+ description: The Availability Zones for the load balancer.
+ type: list
+ sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }]
+ canonical_hosted_zone_id:
+ description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
+ type: str
+ sample: "ABCDEF12345678"
+ created_time:
+ description: The date and time the load balancer was created.
+ type: str
+ sample: "2015-02-12T02:14:02+00:00"
+ deletion_protection_enabled:
+ description: Indicates whether deletion protection is enabled.
+ type: bool
+ sample: true
+ dns_name:
+ description: The public DNS name of the load balancer.
+ type: str
+ sample: "internal-my-alb-123456789.ap-southeast-2.elb.amazonaws.com"
+ idle_timeout_timeout_seconds:
+ description: The idle timeout value, in seconds.
+ type: int
+ sample: 60
+ ip_address_type:
+ description: The type of IP addresses used by the subnets for the load balancer.
+ type: str
+ sample: "ipv4"
+ listeners:
+ description: Information about the listeners.
+ type: complex
+ contains:
+ listener_arn:
+ description: The Amazon Resource Name (ARN) of the listener.
+ type: str
+ sample: ""
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ type: str
+ sample: ""
+ port:
+ description: The port on which the load balancer is listening.
+ type: int
+ sample: 80
+ protocol:
+ description: The protocol for connections from clients to the load balancer.
+ type: str
+ sample: "HTTPS"
+ certificates:
+ description: The SSL server certificate.
+ type: complex
+ contains:
+ certificate_arn:
+ description: The Amazon Resource Name (ARN) of the certificate.
+ type: str
+ sample: ""
+ ssl_policy:
+ description: The security policy that defines which ciphers and protocols are supported.
+ type: str
+ sample: ""
+ default_actions:
+ description: The default actions for the listener.
+ type: str
+ contains:
+ type:
+ description: The type of action.
+ type: str
+ sample: ""
+ target_group_arn:
+ description: The Amazon Resource Name (ARN) of the target group.
+ type: str
+ sample: ""
+ load_balancer_arn:
+ description: The Amazon Resource Name (ARN) of the load balancer.
+ type: str
+ sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-alb/001122334455"
+ load_balancer_name:
+ description: The name of the load balancer.
+ type: str
+ sample: "my-alb"
+ routing_http2_enabled:
+ description: Indicates whether HTTP/2 is enabled.
+ type: bool
+ sample: true
+ routing_http_desync_mitigation_mode:
+ description: Determines how the load balancer handles requests that might pose a security risk to an application.
+ type: str
+ sample: "defensive"
+ routing_http_drop_invalid_header_fields_enabled:
+ description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false).
+ type: bool
+ sample: false
+ routing_http_x_amzn_tls_version_and_cipher_suite_enabled:
+ description: Indicates whether the two headers are added to the client request before sending it to the target.
+ type: bool
+ sample: false
+ routing_http_xff_client_port_enabled:
+ description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer.
+ type: bool
+ sample: false
+ scheme:
+ description: Internet-facing or internal load balancer.
+ type: str
+ sample: "internal"
+ security_groups:
+ description: The IDs of the security groups for the load balancer.
+ type: list
+ sample: ['sg-0011223344']
+ state:
+ description: The state of the load balancer.
+ type: dict
+ sample: {'code': 'active'}
+ tags:
+ description: The tags attached to the load balancer.
+ type: dict
+ sample: {
+ 'Tag': 'Example'
+ }
+ type:
+ description: The type of load balancer.
+ type: str
+ sample: "application"
+ vpc_id:
+ description: The ID of the VPC for the load balancer.
+ type: str
+ sample: "vpc-0011223344"
+ waf_fail_open_enabled:
+ description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets
+ if it is unable to forward the request to AWS WAF.
+ type: bool
+ sample: false
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def get_paginator(connection, **kwargs):
+ paginator = connection.get_paginator('describe_load_balancers')
+ return paginator.paginate(**kwargs).build_full_result()
+
+
+def get_alb_listeners(connection, module, alb_arn):
+
+ try:
+ return connection.describe_listeners(LoadBalancerArn=alb_arn)['Listeners']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe alb listeners")
+
+
+def get_listener_rules(connection, module, listener_arn):
+
+ try:
+ return connection.describe_rules(ListenerArn=listener_arn)['Rules']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe listener rules")
+
+
+def get_load_balancer_attributes(connection, module, load_balancer_arn):
+
+ try:
+ load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe load balancer attributes")
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ for k, v in list(load_balancer_attributes.items()):
+ load_balancer_attributes[k.replace('.', '_')] = v
+ del load_balancer_attributes[k]
+
+ return load_balancer_attributes
+
+
+def get_load_balancer_tags(connection, module, load_balancer_arn):
+
+ try:
+ return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe load balancer tags")
+
+
+def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn):
+ try:
+ return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe load balancer ip address type")
+
+
+def list_load_balancers(connection, module):
+ load_balancer_arns = module.params.get("load_balancer_arns")
+ names = module.params.get("names")
+
+ try:
+ if not load_balancer_arns and not names:
+ load_balancers = get_paginator(connection)
+ if load_balancer_arns:
+ load_balancers = get_paginator(connection, LoadBalancerArns=load_balancer_arns)
+ if names:
+ load_balancers = get_paginator(connection, Names=names)
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ module.exit_json(load_balancers=[])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to list load balancers")
+
+ for load_balancer in load_balancers['LoadBalancers']:
+ # Get the attributes for each alb
+ load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))
+
+ # Get the listeners for each alb
+ load_balancer['listeners'] = get_alb_listeners(connection, module, load_balancer['LoadBalancerArn'])
+
+ # For each listener, get listener rules
+ for listener in load_balancer['listeners']:
+ listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])
+
+ # Get ALB ip address type
+ load_balancer['IpAddressType'] = get_load_balancer_ipaddresstype(connection, module, load_balancer['LoadBalancerArn'])
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]
+
+ # Get tags for each load balancer
+ for snaked_load_balancer in snaked_load_balancers:
+ snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])
+
+ module.exit_json(load_balancers=snaked_load_balancers)
+
+
+def main():
+
+ argument_spec = dict(
+ load_balancer_arns=dict(type='list', elements='str'),
+ names=dict(type='list', elements='str')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['load_balancer_arns', 'names']],
+ supports_check_mode=True,
+ )
+
+ try:
+ connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ list_load_balancers(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py
new file mode 100644
index 00000000..5d49d92f
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py
@@ -0,0 +1,2147 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elb_classic_lb
+version_added: 1.0.0
+description:
+ - Creates, updates or destroys an Amazon Elastic Load Balancer (ELB).
+ - This module was renamed from C(amazon.aws.ec2_elb_lb) to M(amazon.aws.elb_classic_lb) in version
+ 2.1.0 of the amazon.aws collection.
+short_description: Creates, updates or destroys an Amazon ELB
+author:
+ - "Jim Dalton (@jsdalton)"
+ - "Mark Chappell (@tremble)"
+options:
+ state:
+ description:
+ - Create or destroy the ELB.
+ type: str
+ choices: [ absent, present ]
+ required: true
+ name:
+ description:
+ - The name of the ELB.
+ - The name of an ELB must be less than 32 characters and unique per-region per-account.
+ type: str
+ required: true
+ listeners:
+ description:
+ - List of ports/protocols for this ELB to listen on (see examples).
+ - Required when I(state=present) and the ELB doesn't exist.
+ type: list
+ elements: dict
+ suboptions:
+ load_balancer_port:
+ description:
+ - The port on which the load balancer will listen.
+ type: int
+ required: True
+ instance_port:
+ description:
+ - The port on which the instance is listening.
+ type: int
+ required: True
+ ssl_certificate_id:
+ description:
+ - The Amazon Resource Name (ARN) of the SSL certificate.
+ type: str
+ protocol:
+ description:
+ - The transport protocol to use for routing.
+ - Valid values are C(HTTP), C(HTTPS), C(TCP), or C(SSL).
+ type: str
+ required: True
+ instance_protocol:
+ description:
+ - The protocol to use for routing traffic to instances.
+ - Valid values are C(HTTP), C(HTTPS), C(TCP), or C(SSL),
+ type: str
+ proxy_protocol:
+ description:
+ - Enable proxy protocol for the listener.
+ - Beware, ELB controls for the proxy protocol are based on the
+ I(instance_port). If you have multiple listeners talking to
+ the same I(instance_port), this will affect all of them.
+ type: bool
+ purge_listeners:
+ description:
+ - Purge existing listeners on ELB that are not found in listeners.
+ type: bool
+ default: true
+ instance_ids:
+ description:
+ - List of instance ids to attach to this ELB.
+ type: list
+ elements: str
+ purge_instance_ids:
+ description:
+ - Purge existing instance ids on ELB that are not found in I(instance_ids).
+ type: bool
+ default: false
+ zones:
+ description:
+ - List of availability zones to enable on this ELB.
+ - Mutually exclusive with I(subnets).
+ type: list
+ elements: str
+ purge_zones:
+ description:
+ - Purge existing availability zones on ELB that are not found in I(zones).
+ type: bool
+ default: false
+ security_group_ids:
+ description:
+ - A list of security groups to apply to the ELB.
+ type: list
+ elements: str
+ security_group_names:
+ description:
+ - A list of security group names to apply to the ELB.
+ type: list
+ elements: str
+ health_check:
+ description:
+ - A dictionary of health check configuration settings (see examples).
+ type: dict
+ suboptions:
+ ping_protocol:
+ description:
+ - The protocol which the ELB health check will use when performing a
+ health check.
+ - Valid values are C('HTTP'), C('HTTPS'), C('TCP') and C('SSL').
+ required: true
+ type: str
+ ping_path:
+ description:
+ - The URI path which the ELB health check will query when performing a
+ health check.
+ - Required when I(ping_protocol=HTTP) or I(ping_protocol=HTTPS).
+ required: false
+ type: str
+ ping_port:
+ description:
+ - The TCP port to which the ELB will connect when performing a
+ health check.
+ required: true
+ type: int
+ interval:
+ description:
+ - The approximate interval, in seconds, between health checks of an individual instance.
+ required: true
+ type: int
+ timeout:
+ description:
+ - The amount of time, in seconds, after which no response means a failed health check.
+ aliases: ['response_timeout']
+ required: true
+ type: int
+ unhealthy_threshold:
+ description:
+ - The number of consecutive health check failures required before moving
+ the instance to the Unhealthy state.
+ required: true
+ type: int
+ healthy_threshold:
+ description:
+ - The number of consecutive health checks successes required before moving
+ the instance to the Healthy state.
+ required: true
+ type: int
+ access_logs:
+ description:
+ - A dictionary of access logs configuration settings (see examples).
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - When set to C(True) will configure delivery of access logs to an S3
+ bucket.
+ - When set to C(False) will disable delivery of access logs.
+ required: false
+ type: bool
+ default: true
+ s3_location:
+ description:
+ - The S3 bucket to deliver access logs to.
+ - See U(https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html)
+ for more information about the necessary S3 bucket policies.
+ - Required when I(enabled=True).
+ required: false
+ type: str
+ s3_prefix:
+ description:
+ - Where in the S3 bucket to deliver the logs.
+ - If the prefix is not provided or set to C(""), the log is placed at the root level of the bucket.
+ required: false
+ type: str
+ default: ""
+ interval:
+ description:
+ - The interval for publishing the access logs to S3.
+ required: false
+ type: int
+ default: 60
+ choices: [ 5, 60 ]
+ subnets:
+ description:
+ - A list of VPC subnets to use when creating the ELB.
+ - Mutually exclusive with I(zones).
+ type: list
+ elements: str
+ purge_subnets:
+ description:
+ - Purge existing subnets on the ELB that are not found in I(subnets).
+ - Because it is not permitted to add multiple subnets from the same
+ availability zone, subnets to be purged will be removed before new
+ subnets are added. This may cause a brief outage if you try to replace
+ all subnets at once.
+ type: bool
+ default: false
+ scheme:
+ description:
+ - The scheme to use when creating the ELB.
+ - For a private VPC-visible ELB use C(internal).
+ - If you choose to update your scheme with a different value the ELB will be destroyed and
+ a new ELB created.
+ - Defaults to I(scheme=internet-facing).
+ type: str
+ choices: ["internal", "internet-facing"]
+ connection_draining_timeout:
+ description:
+ - Wait a specified timeout allowing connections to drain before terminating an instance.
+ - Set to C(0) to disable connection draining.
+ type: int
+ idle_timeout:
+ description:
+ - ELB connections from clients and to servers are timed out after this amount of time.
+ type: int
+ cross_az_load_balancing:
+ description:
+ - Distribute load across all configured Availability Zones.
+ - Defaults to C(false).
+ type: bool
+ stickiness:
+ description:
+ - A dictionary of stickiness policy settings.
+ - Policy will be applied to all listeners (see examples).
+ type: dict
+ suboptions:
+ type:
+ description:
+ - The type of stickiness policy to apply.
+ - Required if I(enabled=true).
+ - Ignored if I(enabled=false).
+ required: false
+ type: 'str'
+ choices: ['application','loadbalancer']
+ enabled:
+ description:
+ - When I(enabled=false) session stickiness will be disabled for all listeners.
+ required: false
+ type: bool
+ default: true
+ cookie:
+ description:
+ - The name of the application cookie used for stickiness.
+ - Required if I(enabled=true) and I(type=application).
+ - Ignored if I(enabled=false).
+ required: false
+ type: str
+ expiration:
+ description:
+ - The time period, in seconds, after which the cookie should be considered stale.
+ - If this parameter is not specified, the stickiness session lasts for the duration of the browser session.
+ - Ignored if I(enabled=false).
+ required: false
+ type: int
+ wait:
+ description:
+ - When creating, deleting, or adding instances to an ELB, if I(wait=true)
+ Ansible will wait for both the load balancer and related network interfaces
+ to finish creating/deleting.
+ - Support for waiting when adding instances was added in release 2.1.0.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated.
+ - A maximum of 600 seconds (10 minutes) is allowed.
+ type: int
+ default: 180
+
+notes:
+ - The ec2_elb fact previously set by this module was deprecated in release 2.1.0 and since release
+ 4.0.0 is no longer set.
+ - Support for I(purge_tags) was added in release 2.1.0.
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+# Basic provisioning example (non-VPC)
+
+- amazon.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http # options are http, https, ssl, tcp
+ load_balancer_port: 80
+ instance_port: 80
+ proxy_protocol: True
+ - protocol: https
+ load_balancer_port: 443
+ instance_protocol: http # optional, defaults to value of protocol setting
+ instance_port: 80
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
+
+# Internal ELB example
+
+- amazon.aws.elb_classic_lb:
+ name: "test-vpc"
+ scheme: internal
+ state: present
+ instance_ids:
+ - i-abcd1234
+ purge_instance_ids: true
+ subnets:
+ - subnet-abcd1234
+ - subnet-1a2b3c4d
+ listeners:
+ - protocol: http # options are http, https, ssl, tcp
+ load_balancer_port: 80
+ instance_port: 80
+
+# Configure a health check and the access logs
+- amazon.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: http # options are http, https, ssl, tcp
+ ping_port: 80
+ ping_path: "/index.html" # not required for tcp or ssl
+ response_timeout: 5 # seconds
+ interval: 30 # seconds
+ unhealthy_threshold: 2
+ healthy_threshold: 10
+ access_logs:
+ interval: 5 # minutes (defaults to 60)
+ s3_location: "my-bucket" # This value is required if access_logs is set
+ s3_prefix: "logs"
+
+# Ensure ELB is gone
+- amazon.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: absent
+
+# Ensure ELB is gone and wait for check (for default timeout)
+- amazon.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: absent
+ wait: true
+
+# Ensure ELB is gone and wait for check with timeout value
+- amazon.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: absent
+ wait: true
+ wait_timeout: 600
+
+# Normally, this module will purge any listeners that exist on the ELB
+# but aren't specified in the listeners parameter. If purge_listeners is
+# false it leaves them alone
+- amazon.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_listeners: false
+
+# Normally, this module will leave availability zones that are enabled
+# on the ELB alone. If purge_zones is true, then any extraneous zones
+# will be removed
+- amazon.aws.elb_classic_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_zones: true
+
+# Creates a ELB and assigns a list of subnets to it.
+- amazon.aws.elb_classic_lb:
+ state: present
+ name: 'New ELB'
+ security_group_ids: 'sg-123456, sg-67890'
+ subnets: 'subnet-123456,subnet-67890'
+ purge_subnets: true
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+
+# Create an ELB with connection draining, increased idle timeout and cross availability
+# zone load balancing
+- amazon.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ connection_draining_timeout: 60
+ idle_timeout: 300
+ cross_az_load_balancing: "yes"
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+
+# Create an ELB with load balancer stickiness enabled
+- amazon.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ stickiness:
+ type: loadbalancer
+ enabled: true
+ expiration: 300
+
+# Create an ELB with application stickiness enabled
+- amazon.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ stickiness:
+ type: application
+ enabled: true
+ cookie: SESSIONID
+
+# Create an ELB and add tags
+- amazon.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ tags:
+ Name: "New ELB"
+ stack: "production"
+ client: "Bob"
+
+# Delete all tags from an ELB
+- amazon.aws.elb_classic_lb:
+ name: "New ELB"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ tags: {}
+"""
+
+RETURN = '''
+elb:
+ description: Load Balancer attributes
+ returned: always
+ type: dict
+ contains:
+ app_cookie_policy:
+ description: The name of the policy used to control if the ELB is using a application cookie stickiness policy.
+ type: str
+ sample: ec2-elb-lb-AppCookieStickinessPolicyType
+ returned: when state is not 'absent'
+ backends:
+ description: A description of the backend policy applied to the ELB (instance-port:policy-name).
+ type: str
+ sample: 8181:ProxyProtocol-policy
+ returned: when state is not 'absent'
+ connection_draining_timeout:
+ description: The maximum time, in seconds, to keep the existing connections open before deregistering the instances.
+ type: int
+ sample: 25
+ returned: when state is not 'absent'
+ cross_az_load_balancing:
+ description: Either C('yes') if cross-AZ load balancing is enabled, or C('no') if cross-AZ load balancing is disabled.
+ type: str
+ sample: 'yes'
+ returned: when state is not 'absent'
+ dns_name:
+ description: The DNS name of the ELB.
+ type: str
+ sample: internal-ansible-test-935c585850ac-1516306744.us-east-1.elb.amazonaws.com
+ returned: when state is not 'absent'
+ health_check:
+ description: A dictionary describing the health check used for the ELB.
+ type: dict
+ returned: when state is not 'absent'
+ contains:
+ healthy_threshold:
+ description: The number of consecutive successful health checks before marking an instance as healthy.
+ type: int
+ sample: 2
+ interval:
+ description: The time, in seconds, between each health check.
+ type: int
+ sample: 10
+ target:
+ description: The Protocol, Port, and for HTTP(S) health checks the path tested by the health check.
+ type: str
+ sample: TCP:22
+ timeout:
+ description: The time, in seconds, after which an in progress health check is considered failed due to a timeout.
+ type: int
+ sample: 5
+ unhealthy_threshold:
+ description: The number of consecutive failed health checks before marking an instance as unhealthy.
+ type: int
+ sample: 2
+ hosted_zone_id:
+ description: The ID of the Amazon Route 53 hosted zone for the load balancer.
+ type: str
+ sample: Z35SXDOTRQ7X7K
+ returned: when state is not 'absent'
+ hosted_zone_name:
+ description: The DNS name of the load balancer when using a custom hostname.
+ type: str
+ sample: 'ansible-module.example'
+ returned: when state is not 'absent'
+ idle_timeout:
+ description: The length of of time before an idle connection is dropped by the ELB.
+ type: int
+ sample: 50
+ returned: when state is not 'absent'
+ in_service_count:
+ description: The number of instances attached to the ELB in an in-service state.
+ type: int
+ sample: 1
+ returned: when state is not 'absent'
+ instance_health:
+ description: A list of dictionaries describing the health of each instance attached to the ELB.
+ type: list
+ elements: dict
+ returned: when state is not 'absent'
+ contains:
+ description:
+ description: A human readable description of why the instance is not in service.
+ type: str
+ sample: N/A
+ returned: when state is not 'absent'
+ instance_id:
+ description: The ID of the instance.
+ type: str
+ sample: i-03dcc8953a03d6435
+ returned: when state is not 'absent'
+ reason_code:
+ description: A code describing why the instance is not in service.
+ type: str
+ sample: N/A
+ returned: when state is not 'absent'
+ state:
+ description: The current service state of the instance.
+ type: str
+ sample: InService
+ returned: when state is not 'absent'
+ instances:
+ description: A list of the IDs of instances attached to the ELB.
+ type: list
+ elements: str
+ sample: ['i-03dcc8953a03d6435']
+ returned: when state is not 'absent'
+ lb_cookie_policy:
+ description: The name of the policy used to control if the ELB is using a cookie stickiness policy.
+ type: str
+ sample: ec2-elb-lb-LBCookieStickinessPolicyType
+ returned: when state is not 'absent'
+ listeners:
+ description:
+ - A list of lists describing the listeners attached to the ELB.
+ - The nested list contains the listener port, the instance port, the listener protoco, the instance port,
+ and where appropriate the ID of the SSL certificate for the port.
+ type: list
+ elements: list
+ sample: [[22, 22, 'TCP', 'TCP'], [80, 8181, 'HTTP', 'HTTP']]
+ returned: when state is not 'absent'
+ name:
+ description: The name of the ELB. This name is unique per-region, per-account.
+ type: str
+ sample: ansible-test-935c585850ac
+ returned: when state is not 'absent'
+ out_of_service_count:
+ description: The number of instances attached to the ELB in an out-of-service state.
+ type: int
+ sample: 0
+ returned: when state is not 'absent'
+ proxy_policy:
+ description: The name of the policy used to control if the ELB operates using the Proxy protocol.
+ type: str
+ sample: ProxyProtocol-policy
+ returned: when the proxy protocol policy exists.
+ region:
+ description: The AWS region in which the ELB is running.
+ type: str
+ sample: us-east-1
+ returned: always
+ scheme:
+ description: Whether the ELB is an C('internal') or a C('internet-facing') load balancer.
+ type: str
+ sample: internal
+ returned: when state is not 'absent'
+ security_group_ids:
+ description: A list of the IDs of the Security Groups attached to the ELB.
+ type: list
+ elements: str
+ sample: ['sg-0c12ebd82f2fb97dc', 'sg-01ec7378d0c7342e6']
+ returned: when state is not 'absent'
+ status:
+ description: A minimal description of the current state of the ELB. Valid values are C('exists'), C('gone'), C('deleted'), C('created').
+ type: str
+ sample: exists
+ returned: always
+ subnets:
+ description: A list of the subnet IDs attached to the ELB.
+ type: list
+ elements: str
+ sample: ['subnet-00d9d0f70c7e5f63c', 'subnet-03fa5253586b2d2d5']
+ returned: when state is not 'absent'
+ tags:
+ description: A dictionary describing the tags attached to the ELB.
+ type: dict
+ sample: {'Name': 'ansible-test-935c585850ac', 'ExampleTag': 'Example Value'}
+ returned: when state is not 'absent'
+ unknown_instance_state_count:
+ description: The number of instances attached to the ELB in an unknown state.
+ type: int
+ sample: 0
+ returned: when state is not 'absent'
+ zones:
+ description: A list of the AWS regions in which the ELB is running.
+ type: list
+ elements: str
+ sample: ['us-east-1b', 'us-east-1a']
+ returned: when state is not 'absent'
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Taken care of by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+class ElbManager(object):
+ """Handles ELB creation and destruction"""
+
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.listeners = module.params['listeners']
+ self.purge_listeners = module.params['purge_listeners']
+ self.instance_ids = module.params['instance_ids']
+ self.purge_instance_ids = module.params['purge_instance_ids']
+ self.zones = module.params['zones']
+ self.purge_zones = module.params['purge_zones']
+ self.health_check = module.params['health_check']
+ self.access_logs = module.params['access_logs']
+ self.subnets = module.params['subnets']
+ self.purge_subnets = module.params['purge_subnets']
+ self.scheme = module.params['scheme']
+ self.connection_draining_timeout = module.params['connection_draining_timeout']
+ self.idle_timeout = module.params['idle_timeout']
+ self.cross_az_load_balancing = module.params['cross_az_load_balancing']
+ self.stickiness = module.params['stickiness']
+ self.wait = module.params['wait']
+ self.wait_timeout = module.params['wait_timeout']
+ self.tags = module.params['tags']
+ self.purge_tags = module.params['purge_tags']
+
+ self.changed = False
+ self.status = 'gone'
+
+ retry_decorator = AWSRetry.jittered_backoff()
+ self.client = self.module.client('elb', retry_decorator=retry_decorator)
+ self.ec2_client = self.module.client('ec2', retry_decorator=retry_decorator)
+
+ security_group_names = module.params['security_group_names']
+ self.security_group_ids = module.params['security_group_ids']
+
+ self._update_descriptions()
+
+ if security_group_names:
+ # Use the subnets attached to the VPC to find which VPC we're in and
+ # limit the search
+ if self.elb and self.elb.get('Subnets', None):
+ subnets = set(self.elb.get('Subnets') + list(self.subnets or []))
+ else:
+ subnets = set(self.subnets)
+ if subnets:
+ vpc_id = self._get_vpc_from_subnets(subnets)
+ else:
+ vpc_id = None
+ try:
+ self.security_group_ids = self._get_ec2_security_group_ids_from_names(
+ sec_group_list=security_group_names, vpc_id=vpc_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to convert security group names to IDs, try using security group IDs rather than names")
+
+ def _update_descriptions(self):
+ try:
+ self.elb = self._get_elb()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Unable to describe load balancer')
+ try:
+ self.elb_attributes = self._get_elb_attributes()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes')
+ try:
+ self.elb_policies = self._get_elb_policies()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Unable to describe load balancer policies')
+ try:
+ self.elb_health = self._get_elb_instance_health()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg='Unable to describe load balancer instance health')
+
+ # We have a number of complex parameters which can't be validated by
+ # AnsibleModule or are only required if the ELB doesn't exist.
+ def validate_params(self, state=None):
+ problem_found = False
+ # Validate that protocol is one of the permitted values
+ problem_found |= self._validate_listeners(self.listeners)
+ problem_found |= self._validate_health_check(self.health_check)
+ problem_found |= self._validate_stickiness(self.stickiness)
+ if state == 'present':
+ # When creating a new ELB
+ problem_found |= self._validate_creation_requirements()
+ problem_found |= self._validate_access_logs(self.access_logs)
+
+ # Pass check_mode down through to the module
+ @property
+ def check_mode(self):
+ return self.module.check_mode
+
+ def _get_elb_policies(self):
+ try:
+ attributes = self.client.describe_load_balancer_policies(LoadBalancerName=self.name)
+ except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']):
+ return {}
+ except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except
+ # Be forgiving if we can't see the attributes
+ # Note: This will break idempotency if someone has set but not describe
+ self.module.warn('Access Denied trying to describe load balancer policies')
+ return {}
+ return attributes['PolicyDescriptions']
+
+ def _get_elb_instance_health(self):
+ try:
+ instance_health = self.client.describe_instance_health(LoadBalancerName=self.name)
+ except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']):
+ return []
+ except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except
+ # Be forgiving if we can't see the attributes
+ # Note: This will break idempotency if someone has set but not describe
+ self.module.warn('Access Denied trying to describe instance health')
+ return []
+ return instance_health['InstanceStates']
+
+ def _get_elb_attributes(self):
+ try:
+ attributes = self.client.describe_load_balancer_attributes(LoadBalancerName=self.name)
+ except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']):
+ return {}
+ except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except
+ # Be forgiving if we can't see the attributes
+ # Note: This will break idempotency if someone has set but not describe
+ self.module.warn('Access Denied trying to describe load balancer attributes')
+ return {}
+ return attributes['LoadBalancerAttributes']
+
+ def _get_elb(self):
+ try:
+ elbs = self._describe_loadbalancer(self.name)
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ return None
+
+ # Shouldn't happen, but Amazon could change the rules on us...
+ if len(elbs) > 1:
+ self.module.fail_json('Found multiple ELBs with name {0}'.format(self.name))
+
+ self.status = 'exists' if self.status == 'gone' else self.status
+
+ return elbs[0]
+
+ def _delete_elb(self):
+ # True if succeeds, exception raised if not
+ try:
+ if not self.check_mode:
+ self.client.delete_load_balancer(aws_retry=True, LoadBalancerName=self.name)
+ self.changed = True
+ self.status = 'deleted'
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ return False
+ return True
+
+ def _create_elb(self):
+ listeners = list(self._format_listener(l) for l in self.listeners)
+ if not self.scheme:
+ self.scheme = 'internet-facing'
+ params = dict(
+ LoadBalancerName=self.name,
+ AvailabilityZones=self.zones,
+ SecurityGroups=self.security_group_ids,
+ Subnets=self.subnets,
+ Listeners=listeners,
+ Scheme=self.scheme)
+ params = scrub_none_parameters(params)
+ if self.tags:
+ params['Tags'] = ansible_dict_to_boto3_tag_list(self.tags)
+
+ if not self.check_mode:
+ self.client.create_load_balancer(aws_retry=True, **params)
+ # create_load_balancer only returns the DNS name
+ self.elb = self._get_elb()
+ self.changed = True
+ self.status = 'created'
+ return True
+
+ def _format_listener(self, listener, inject_protocol=False):
+ """Formats listener into the format needed by the
+ ELB API"""
+
+ listener = scrub_none_parameters(listener)
+
+ for protocol in ['protocol', 'instance_protocol']:
+ if protocol in listener:
+ listener[protocol] = listener[protocol].upper()
+
+ if inject_protocol and 'instance_protocol' not in listener:
+ listener['instance_protocol'] = listener['protocol']
+
+ # Remove proxy_protocol, it has to be handled as a policy
+ listener.pop('proxy_protocol', None)
+
+ ssl_id = listener.pop('ssl_certificate_id', None)
+
+ formatted_listener = snake_dict_to_camel_dict(listener, True)
+ if ssl_id:
+ formatted_listener['SSLCertificateId'] = ssl_id
+
+ return formatted_listener
+
+ def _format_healthcheck_target(self):
+ """Compose target string from healthcheck parameters"""
+ protocol = self.health_check['ping_protocol'].upper()
+ path = ""
+
+ if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
+ path = self.health_check['ping_path']
+
+ return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
+
+ def _format_healthcheck(self):
+ return dict(
+ Target=self._format_healthcheck_target(),
+ Timeout=self.health_check['timeout'],
+ Interval=self.health_check['interval'],
+ UnhealthyThreshold=self.health_check['unhealthy_threshold'],
+ HealthyThreshold=self.health_check['healthy_threshold'],
+ )
+
+ def ensure_ok(self):
+ """Create the ELB"""
+ if not self.elb:
+ try:
+ self._create_elb()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to create load balancer")
+ try:
+ self.elb_attributes = self._get_elb_attributes()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes')
+ self._wait_created()
+
+ # Some attributes are configured on creation, others need to be updated
+ # after creation. Skip updates for those set on creation
+ else:
+ if self._check_scheme():
+ # XXX We should probably set 'None' parameters based on the
+ # current state prior to deletion
+
+ # the only way to change the scheme is by recreating the resource
+ self.ensure_gone()
+ # We need to wait for it to be gone-gone
+ self._wait_gone(True)
+ try:
+ self._create_elb()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to recreate load balancer")
+ try:
+ self.elb_attributes = self._get_elb_attributes()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes')
+ else:
+ self._set_subnets()
+ self._set_zones()
+ self._set_security_groups()
+ self._set_elb_listeners()
+ self._set_tags()
+
+ self._set_health_check()
+ self._set_elb_attributes()
+ self._set_backend_policies()
+ self._set_stickiness_policies()
+ self._set_instance_ids()
+
+# if self._check_attribute_support('access_log'):
+# self._set_access_log()
+
+ def ensure_gone(self):
+ """Destroy the ELB"""
+ if self.elb:
+ try:
+ self._delete_elb()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to delete load balancer")
+ self._wait_gone()
+
+ def _wait_gone(self, wait=None):
+ if not wait and not self.wait:
+ return
+ try:
+ self._wait_for_elb_removed()
+ # Unfortunately even though the ELB itself is removed quickly
+ # the interfaces take longer so reliant security groups cannot
+ # be deleted until the interface has registered as removed.
+ self._wait_for_elb_interface_removed()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed while waiting for load balancer deletion")
+
+ def _wait_created(self, wait=False):
+ if not wait and not self.wait:
+ return
+ try:
+ self._wait_for_elb_created()
+ # Can take longer than creation
+ self._wait_for_elb_interface_created()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed while waiting for load balancer deletion")
+
+ def get_load_balancer(self):
+ self._update_descriptions()
+ elb = dict(self.elb or {})
+ if not elb:
+ return {}
+
+ elb['LoadBalancerAttributes'] = self.elb_attributes
+ elb['LoadBalancerPolicies'] = self.elb_policies
+ load_balancer = camel_dict_to_snake_dict(elb)
+ try:
+ load_balancer['tags'] = self._get_tags()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to get load balancer tags")
+
+ return load_balancer
+
+ def get_info(self):
+ self._update_descriptions()
+
+ if not self.elb:
+ return dict(
+ name=self.name,
+ status=self.status,
+ region=self.module.region
+ )
+ check_elb = dict(self.elb)
+ check_elb_attrs = dict(self.elb_attributes or {})
+ check_policies = check_elb.get('Policies', {})
+ try:
+ lb_cookie_policy = check_policies['LBCookieStickinessPolicies'][0]['PolicyName']
+ except (KeyError, IndexError):
+ lb_cookie_policy = None
+ try:
+ app_cookie_policy = check_policies['AppCookieStickinessPolicies'][0]['PolicyName']
+ except (KeyError, IndexError):
+ app_cookie_policy = None
+
+ health_check = camel_dict_to_snake_dict(check_elb.get('HealthCheck', {}))
+
+ backend_policies = list()
+ for port, policies in self._get_backend_policies().items():
+ for policy in policies:
+ backend_policies.append("{0}:{1}".format(port, policy))
+
+ info = dict(
+ name=check_elb.get('LoadBalancerName'),
+ dns_name=check_elb.get('DNSName'),
+ zones=check_elb.get('AvailabilityZones'),
+ security_group_ids=check_elb.get('SecurityGroups'),
+ status=self.status,
+ subnets=check_elb.get('Subnets'),
+ scheme=check_elb.get('Scheme'),
+ hosted_zone_name=check_elb.get('CanonicalHostedZoneName'),
+ hosted_zone_id=check_elb.get('CanonicalHostedZoneNameID'),
+ lb_cookie_policy=lb_cookie_policy,
+ app_cookie_policy=app_cookie_policy,
+ proxy_policy=self._get_proxy_protocol_policy(),
+ backends=backend_policies,
+ instances=self._get_instance_ids(),
+ out_of_service_count=0,
+ in_service_count=0,
+ unknown_instance_state_count=0,
+ region=self.module.region,
+ health_check=health_check,
+ )
+
+ instance_health = camel_dict_to_snake_dict(dict(InstanceHealth=self.elb_health))
+ info.update(instance_health)
+
+ # instance state counts: InService or OutOfService
+ if info['instance_health']:
+ for instance_state in info['instance_health']:
+ if instance_state['state'] == "InService":
+ info['in_service_count'] += 1
+ elif instance_state['state'] == "OutOfService":
+ info['out_of_service_count'] += 1
+ else:
+ info['unknown_instance_state_count'] += 1
+
+ listeners = check_elb.get('ListenerDescriptions', [])
+ if listeners:
+ info['listeners'] = list(
+ self._api_listener_as_tuple(l['Listener']) for l in listeners
+ )
+ else:
+ info['listeners'] = []
+
+ try:
+ info['connection_draining_timeout'] = check_elb_attrs['ConnectionDraining']['Timeout']
+ except KeyError:
+ pass
+ try:
+ info['idle_timeout'] = check_elb_attrs['ConnectionSettings']['IdleTimeout']
+ except KeyError:
+ pass
+ try:
+ is_enabled = check_elb_attrs['CrossZoneLoadBalancing']['Enabled']
+ info['cross_az_load_balancing'] = 'yes' if is_enabled else 'no'
+ except KeyError:
+ pass
+
+ # # return stickiness info?
+
+ try:
+ info['tags'] = self._get_tags()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to get load balancer tags")
+
+ return info
+
+ @property
+ def _waiter_config(self):
+ delay = min(10, self.wait_timeout)
+ max_attempts = (self.wait_timeout // delay)
+ return {'Delay': delay, 'MaxAttempts': max_attempts}
+
+ def _wait_for_elb_created(self):
+ if self.check_mode:
+ return True
+
+ waiter = get_waiter(self.client, 'load_balancer_created')
+
+ try:
+ waiter.wait(
+ WaiterConfig=self._waiter_config,
+ LoadBalancerNames=[self.name],
+ )
+ except botocore.exceptions.WaiterError as e:
+ self.module.fail_json_aws(e, 'Timeout waiting for ELB removal')
+
+ return True
+
+ def _wait_for_elb_interface_created(self):
+ if self.check_mode:
+ return True
+ waiter = get_waiter(self.ec2_client, 'network_interface_available')
+
+ filters = ansible_dict_to_boto3_filter_list(
+ {'requester-id': 'amazon-elb',
+ 'description': 'ELB {0}'.format(self.name)}
+ )
+
+ try:
+ waiter.wait(
+ WaiterConfig=self._waiter_config,
+ Filters=filters,
+ )
+ except botocore.exceptions.WaiterError as e:
+ self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal')
+
+ return True
+
+ def _wait_for_elb_removed(self):
+ if self.check_mode:
+ return True
+
+ waiter = get_waiter(self.client, 'load_balancer_deleted')
+
+ try:
+ waiter.wait(
+ WaiterConfig=self._waiter_config,
+ LoadBalancerNames=[self.name],
+ )
+ except botocore.exceptions.WaiterError as e:
+ self.module.fail_json_aws(e, 'Timeout waiting for ELB removal')
+
+ return True
+
+ def _wait_for_elb_interface_removed(self):
+ if self.check_mode:
+ return True
+
+ waiter = get_waiter(self.ec2_client, 'network_interface_deleted')
+
+ filters = ansible_dict_to_boto3_filter_list(
+ {'requester-id': 'amazon-elb',
+ 'description': 'ELB {0}'.format(self.name)}
+ )
+
+ try:
+ waiter.wait(
+ WaiterConfig=self._waiter_config,
+ Filters=filters,
+ )
+ except botocore.exceptions.WaiterError as e:
+ self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal')
+
+ return True
+
+ def _wait_for_instance_state(self, waiter_name, instances):
+ if not instances:
+ return False
+
+ if self.check_mode:
+ return True
+
+ waiter = get_waiter(self.client, waiter_name)
+
+ instance_list = list(dict(InstanceId=instance) for instance in instances)
+
+ try:
+ waiter.wait(
+ WaiterConfig=self._waiter_config,
+ LoadBalancerName=self.name,
+ Instances=instance_list,
+ )
+ except botocore.exceptions.WaiterError as e:
+ self.module.fail_json_aws(e, 'Timeout waiting for ELB Instance State')
+
+ return True
+
+ def _create_elb_listeners(self, listeners):
+ """Takes a list of listener definitions and creates them"""
+ if not listeners:
+ return False
+ self.changed = True
+ if self.check_mode:
+ return True
+
+ self.client.create_load_balancer_listeners(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ Listeners=listeners,
+ )
+ return True
+
+ def _delete_elb_listeners(self, ports):
+ """Takes a list of listener ports and deletes them from the ELB"""
+ if not ports:
+ return False
+ self.changed = True
+ if self.check_mode:
+ return True
+
+ self.client.delete_load_balancer_listeners(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ LoadBalancerPorts=ports,
+ )
+ return True
+
+ def _set_elb_listeners(self):
+ """
+ Creates listeners specified by self.listeners; overwrites existing
+ listeners on these ports; removes extraneous listeners
+ """
+
+ if not self.listeners:
+ return False
+
+ # We can't use sets here: dicts aren't hashable, so convert to the boto3
+ # format and use a generator to filter
+ new_listeners = list(self._format_listener(l, True) for l in self.listeners)
+ existing_listeners = list(l['Listener'] for l in self.elb['ListenerDescriptions'])
+ listeners_to_remove = list(l for l in existing_listeners if l not in new_listeners)
+ listeners_to_add = list(l for l in new_listeners if l not in existing_listeners)
+
+ changed = False
+
+ if self.purge_listeners:
+ ports_to_remove = list(l['LoadBalancerPort'] for l in listeners_to_remove)
+ else:
+ old_ports = set(l['LoadBalancerPort'] for l in listeners_to_remove)
+ new_ports = set(l['LoadBalancerPort'] for l in listeners_to_add)
+ # If we're not purging, then we need to remove Listeners
+ # where the full definition doesn't match, but the port does
+ ports_to_remove = list(old_ports & new_ports)
+
+ # Update is a delete then add, so do the deletion first
+ try:
+ changed |= self._delete_elb_listeners(ports_to_remove)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to remove listeners from load balancer")
+ try:
+ changed |= self._create_elb_listeners(listeners_to_add)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to remove listeners from load balancer")
+
+ return changed
+
+ def _api_listener_as_tuple(self, listener):
+ """Adds ssl_certificate_id to ELB API tuple if present"""
+ base_tuple = [
+ listener.get('LoadBalancerPort'),
+ listener.get('InstancePort'),
+ listener.get('Protocol'),
+ listener.get('InstanceProtocol'),
+ ]
+ if listener.get('SSLCertificateId', False):
+ base_tuple.append(listener.get('SSLCertificateId'))
+ return tuple(base_tuple)
+
+ def _attach_subnets(self, subnets):
+ if not subnets:
+ return False
+ self.changed = True
+ if self.check_mode:
+ return True
+ self.client.attach_load_balancer_to_subnets(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ Subnets=subnets)
+ return True
+
+ def _detach_subnets(self, subnets):
+ if not subnets:
+ return False
+ self.changed = True
+ if self.check_mode:
+ return True
+ self.client.detach_load_balancer_from_subnets(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ Subnets=subnets)
+ return True
+
+ def _set_subnets(self):
+ """Determine which subnets need to be attached or detached on the ELB"""
+ # Subnets parameter not set, nothing to change
+ if self.subnets is None:
+ return False
+
+ changed = False
+
+ if self.purge_subnets:
+ subnets_to_detach = list(set(self.elb['Subnets']) - set(self.subnets))
+ else:
+ subnets_to_detach = list()
+ subnets_to_attach = list(set(self.subnets) - set(self.elb['Subnets']))
+
+ # You can't add multiple subnets from the same AZ. Remove first, then
+ # add.
+ try:
+ changed |= self._detach_subnets(subnets_to_detach)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to detach subnets from load balancer")
+ try:
+ changed |= self._attach_subnets(subnets_to_attach)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to attach subnets to load balancer")
+
+ return changed
+
+ def _check_scheme(self):
+ """Determine if the current scheme is different than the scheme of the ELB"""
+ if self.scheme:
+ if self.elb['Scheme'] != self.scheme:
+ return True
+ return False
+
+ def _enable_zones(self, zones):
+ if not zones:
+ return False
+ self.changed = True
+ if self.check_mode:
+ return True
+
+ try:
+ self.client.enable_availability_zones_for_load_balancer(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ AvailabilityZones=zones,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg='Failed to enable zones for load balancer')
+ return True
+
+ def _disable_zones(self, zones):
+ if not zones:
+ return False
+ self.changed = True
+ if self.check_mode:
+ return True
+
+ try:
+ self.client.disable_availability_zones_for_load_balancer(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ AvailabilityZones=zones,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg='Failed to disable zones for load balancer')
+ return True
+
+ def _set_zones(self):
+ """Determine which zones need to be enabled or disabled on the ELB"""
+ # zones parameter not set, nothing to changeA
+ if self.zones is None:
+ return False
+
+ changed = False
+
+ if self.purge_zones:
+ zones_to_disable = list(set(self.elb['AvailabilityZones']) - set(self.zones))
+ else:
+ zones_to_disable = list()
+ zones_to_enable = list(set(self.zones) - set(self.elb['AvailabilityZones']))
+
+ # Add before we remove to reduce the chance of an outage if someone
+ # replaces all zones at once
+ try:
+ changed |= self._enable_zones(zones_to_enable)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to enable zone on load balancer")
+ try:
+ changed |= self._disable_zones(zones_to_disable)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to attach zone to load balancer")
+
+ return changed
+
+ def _set_security_groups(self):
+ if not self.security_group_ids:
+ return False
+ # Security Group Names should already by converted to IDs by this point.
+ if set(self.elb['SecurityGroups']) == set(self.security_group_ids):
+ return False
+
+ self.changed = True
+
+ if self.check_mode:
+ return True
+
+ try:
+ self.client.apply_security_groups_to_load_balancer(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ SecurityGroups=self.security_group_ids,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to apply security groups to load balancer")
+ return True
+
+ def _set_health_check(self):
+ if not self.health_check:
+ return False
+
+ """Set health check values on ELB as needed"""
+ health_check_config = self._format_healthcheck()
+
+ if self.elb and health_check_config == self.elb['HealthCheck']:
+ return False
+
+ self.changed = True
+ if self.check_mode:
+ return True
+ try:
+ self.client.configure_health_check(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ HealthCheck=health_check_config,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to apply healthcheck to load balancer")
+
+ return True
+
+ def _set_elb_attributes(self):
+ attributes = {}
+ if self.cross_az_load_balancing is not None:
+ attr = dict(Enabled=self.cross_az_load_balancing)
+ if not self.elb_attributes.get('CrossZoneLoadBalancing', None) == attr:
+ attributes['CrossZoneLoadBalancing'] = attr
+
+ if self.idle_timeout is not None:
+ attr = dict(IdleTimeout=self.idle_timeout)
+ if not self.elb_attributes.get('ConnectionSettings', None) == attr:
+ attributes['ConnectionSettings'] = attr
+
+ if self.connection_draining_timeout is not None:
+ curr_attr = dict(self.elb_attributes.get('ConnectionDraining', {}))
+ if self.connection_draining_timeout == 0:
+ attr = dict(Enabled=False)
+ curr_attr.pop('Timeout', None)
+ else:
+ attr = dict(Enabled=True, Timeout=self.connection_draining_timeout)
+ if not curr_attr == attr:
+ attributes['ConnectionDraining'] = attr
+
+ if self.access_logs is not None:
+ curr_attr = dict(self.elb_attributes.get('AccessLog', {}))
+ # For disabling we only need to compare and pass 'Enabled'
+ if not self.access_logs.get('enabled'):
+ curr_attr = dict(Enabled=curr_attr.get('Enabled', False))
+ attr = dict(Enabled=self.access_logs.get('enabled'))
+ else:
+ attr = dict(
+ Enabled=True,
+ S3BucketName=self.access_logs['s3_location'],
+ S3BucketPrefix=self.access_logs.get('s3_prefix', ''),
+ EmitInterval=self.access_logs.get('interval', 60),
+ )
+ if not curr_attr == attr:
+ attributes['AccessLog'] = attr
+
+ if not attributes:
+ return False
+
+ self.changed = True
+ if self.check_mode:
+ return True
+
+ try:
+ self.client.modify_load_balancer_attributes(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ LoadBalancerAttributes=attributes
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to apply load balancer attrbutes")
+
+ def _proxy_policy_name(self):
+ return 'ProxyProtocol-policy'
+
+ def _policy_name(self, policy_type):
+ return 'ec2-elb-lb-{0}'.format(policy_type)
+
+ def _get_listener_policies(self):
+ """Get a list of listener policies mapped to the LoadBalancerPort"""
+ if not self.elb:
+ return {}
+ listener_descriptions = self.elb.get('ListenerDescriptions', [])
+ policies = {l['LoadBalancerPort']: l['PolicyNames'] for l in listener_descriptions}
+ return policies
+
+ def _set_listener_policies(self, port, policies):
+ self.changed = True
+ if self.check_mode:
+ return True
+
+ try:
+ self.client.set_load_balancer_policies_of_listener(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ LoadBalancerPort=port,
+ PolicyNames=list(policies),
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to set load balancer listener policies",
+ port=port, policies=policies)
+
+ return True
+
+ def _get_stickiness_policies(self):
+ """Get a list of AppCookieStickinessPolicyType and LBCookieStickinessPolicyType policies"""
+ return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] in ['AppCookieStickinessPolicyType', 'LBCookieStickinessPolicyType'])
+
+ def _get_app_stickness_policy_map(self):
+ """Get a mapping of App Cookie Stickiness policy names to their definitions"""
+ policies = self.elb.get('Policies', {}).get('AppCookieStickinessPolicies', [])
+ return {p['PolicyName']: p for p in policies}
+
+ def _get_lb_stickness_policy_map(self):
+ """Get a mapping of LB Cookie Stickiness policy names to their definitions"""
+ policies = self.elb.get('Policies', {}).get('LBCookieStickinessPolicies', [])
+ return {p['PolicyName']: p for p in policies}
+
+ def _purge_stickiness_policies(self):
+ """Removes all stickiness policies from all Load Balancers"""
+ # Used when purging stickiness policies or updating a policy (you can't
+ # update a policy while it's connected to a Listener)
+ stickiness_policies = set(self._get_stickiness_policies())
+ listeners = self.elb['ListenerDescriptions']
+ changed = False
+ for listener in listeners:
+ port = listener['Listener']['LoadBalancerPort']
+ policies = set(listener['PolicyNames'])
+ new_policies = set(policies - stickiness_policies)
+ if policies != new_policies:
+ changed |= self._set_listener_policies(port, new_policies)
+
+ return changed
+
+ def _set_stickiness_policies(self):
+ if self.stickiness is None:
+ return False
+
+ # Make sure that the list of policies and listeners is up to date, we're
+ # going to make changes to all listeners
+ self._update_descriptions()
+
+ if not self.stickiness['enabled']:
+ return self._purge_stickiness_policies()
+
+ if self.stickiness['type'] == 'loadbalancer':
+ policy_name = self._policy_name('LBCookieStickinessPolicyType')
+ expiration = self.stickiness.get('expiration')
+ if not expiration:
+ expiration = 0
+ policy_description = dict(
+ PolicyName=policy_name,
+ CookieExpirationPeriod=expiration,
+ )
+ existing_policies = self._get_lb_stickness_policy_map()
+ add_method = self.client.create_lb_cookie_stickiness_policy
+ elif self.stickiness['type'] == 'application':
+ policy_name = self._policy_name('AppCookieStickinessPolicyType')
+ policy_description = dict(
+ PolicyName=policy_name,
+ CookieName=self.stickiness.get('cookie', 0)
+ )
+ existing_policies = self._get_app_stickness_policy_map()
+ add_method = self.client.create_app_cookie_stickiness_policy
+ else:
+ # We shouldn't get here...
+ self.module.fail_json(
+ msg='Unknown stickiness policy {0}'.format(
+ self.stickiness['type']
+ )
+ )
+
+ changed = False
+ # To update a policy we need to delete then re-add, and we can only
+ # delete if the policy isn't attached to a listener
+ if policy_name in existing_policies:
+ if existing_policies[policy_name] != policy_description:
+ changed |= self._purge_stickiness_policies()
+
+ if changed:
+ self._update_descriptions()
+
+ changed |= self._set_stickiness_policy(
+ method=add_method,
+ description=policy_description,
+ existing_policies=existing_policies,
+ )
+
+ listeners = self.elb['ListenerDescriptions']
+ for listener in listeners:
+ changed |= self._set_lb_stickiness_policy(
+ listener=listener,
+ policy=policy_name
+ )
+ return changed
+
+ def _delete_loadbalancer_policy(self, policy_name):
+ self.changed = True
+ if self.check_mode:
+ return True
+
+ try:
+ self.client.delete_load_balancer_policy(
+ LoadBalancerName=self.name,
+ PolicyName=policy_name,
+ )
+ except is_boto3_error_code('InvalidConfigurationRequest'):
+ # Already deleted
+ return False
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Failed to load balancer policy {0}".format(policy_name))
+ return True
+
+ def _set_stickiness_policy(self, method, description, existing_policies=None):
+ changed = False
+ if existing_policies:
+ policy_name = description['PolicyName']
+ if policy_name in existing_policies:
+ if existing_policies[policy_name] == description:
+ return False
+ if existing_policies[policy_name] != description:
+ changed |= self._delete_loadbalancer_policy(policy_name)
+
+ self.changed = True
+ changed = True
+
+ if self.check_mode:
+ return changed
+
+ # This needs to be in place for comparisons, but not passed to the
+ # method.
+ if not description.get('CookieExpirationPeriod', None):
+ description.pop('CookieExpirationPeriod', None)
+
+ try:
+ method(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ **description
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to create load balancer stickiness policy",
+ description=description)
+ return changed
+
+ def _set_lb_stickiness_policy(self, listener, policy):
+ port = listener['Listener']['LoadBalancerPort']
+ stickiness_policies = set(self._get_stickiness_policies())
+ changed = False
+
+ policies = set(listener['PolicyNames'])
+ new_policies = list(policies - stickiness_policies)
+ new_policies.append(policy)
+
+ if policies != set(new_policies):
+ changed |= self._set_listener_policies(port, new_policies)
+
+ return changed
+
+ def _get_backend_policies(self):
+ """Get a list of backend policies mapped to the InstancePort"""
+ if not self.elb:
+ return {}
+ server_descriptions = self.elb.get('BackendServerDescriptions', [])
+ policies = {b['InstancePort']: b['PolicyNames'] for b in server_descriptions}
+ return policies
+
+ def _get_proxy_protocol_policy(self):
+ """Returns the name of the name of the ProxyPolicy if created"""
+ all_proxy_policies = self._get_proxy_policies()
+ if not all_proxy_policies:
+ return None
+ if len(all_proxy_policies) == 1:
+ return all_proxy_policies[0]
+ return all_proxy_policies
+
+ def _get_proxy_policies(self):
+ """Get a list of ProxyProtocolPolicyType policies"""
+ return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] == 'ProxyProtocolPolicyType')
+
+ def _get_policy_map(self):
+ """Get a mapping of Policy names to their definitions"""
+ return {p['PolicyName']: p for p in self.elb_policies}
+
+ def _set_backend_policies(self):
+ """Sets policies for all backends"""
+ # Currently only supports setting ProxyProtocol policies
+ if not self.listeners:
+ return False
+
+ backend_policies = self._get_backend_policies()
+ proxy_policies = set(self._get_proxy_policies())
+
+ proxy_ports = dict()
+ for listener in self.listeners:
+ proxy_protocol = listener.get('proxy_protocol', None)
+ # Only look at the listeners for which proxy_protocol is defined
+ if proxy_protocol is None:
+ next
+ instance_port = listener.get('instance_port')
+ if proxy_ports.get(instance_port, None) is not None:
+ if proxy_ports[instance_port] != proxy_protocol:
+ self.module.fail_json_aws(
+ 'proxy_protocol set to conflicting values for listeners'
+ ' on port {0}'.format(instance_port))
+ proxy_ports[instance_port] = proxy_protocol
+
+ if not proxy_ports:
+ return False
+
+ changed = False
+
+ # If anyone's set proxy_protocol to true, make sure we have our policy
+ # in place.
+ proxy_policy_name = self._proxy_policy_name()
+ if any(proxy_ports.values()):
+ changed |= self._set_proxy_protocol_policy(proxy_policy_name)
+
+ for port in proxy_ports:
+ current_policies = set(backend_policies.get(port, []))
+ new_policies = list(current_policies - proxy_policies)
+ if proxy_ports[port]:
+ new_policies.append(proxy_policy_name)
+
+ changed |= self._set_backend_policy(port, new_policies)
+
+ return changed
+
+ def _set_backend_policy(self, port, policies):
+ backend_policies = self._get_backend_policies()
+ current_policies = set(backend_policies.get(port, []))
+
+ if current_policies == set(policies):
+ return False
+
+ self.changed = True
+
+ if self.check_mode:
+ return True
+
+ try:
+ self.client.set_load_balancer_policies_for_backend_server(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ InstancePort=port,
+ PolicyNames=policies,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to set load balancer backend policies",
+ port=port, policies=policies)
+
+ return True
+
+ def _set_proxy_protocol_policy(self, policy_name):
+ """Install a proxy protocol policy if needed"""
+ policy_map = self._get_policy_map()
+
+ policy_attributes = [dict(AttributeName='ProxyProtocol', AttributeValue='true')]
+
+ proxy_policy = dict(
+ PolicyName=policy_name,
+ PolicyTypeName='ProxyProtocolPolicyType',
+ PolicyAttributeDescriptions=policy_attributes,
+ )
+
+ existing_policy = policy_map.get(policy_name)
+ if proxy_policy == existing_policy:
+ return False
+
+ if existing_policy is not None:
+ self.module.fail_json(
+ msg="Unable to configure ProxyProtocol policy. "
+ "Policy with name {0} already exists and doesn't match.".format(policy_name),
+ policy=proxy_policy, existing_policy=existing_policy,
+ )
+
+ proxy_policy['PolicyAttributes'] = proxy_policy.pop('PolicyAttributeDescriptions')
+ proxy_policy['LoadBalancerName'] = self.name
+ self.changed = True
+
+ if self.check_mode:
+ return True
+
+ try:
+ self.client.create_load_balancer_policy(
+ aws_retry=True,
+ **proxy_policy
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to create load balancer policy", policy=proxy_policy)
+
+ return True
+
+ def _get_instance_ids(self):
+ """Get the current list of instance ids installed in the elb"""
+ elb = self.elb or {}
+ return list(i['InstanceId'] for i in elb.get('Instances', []))
+
+ def _change_instances(self, method, instances):
+ if not instances:
+ return False
+
+ self.changed = True
+ if self.check_mode:
+ return True
+
+ instance_id_list = list({'InstanceId': i} for i in instances)
+ try:
+ method(
+ aws_retry=True,
+ LoadBalancerName=self.name,
+ Instances=instance_id_list,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to change instance registration",
+ instances=instance_id_list, name=self.name)
+ return True
+
+ def _set_instance_ids(self):
+ """Register or deregister instances from an lb instance"""
+ new_instances = self.instance_ids or []
+ existing_instances = self._get_instance_ids()
+
+ instances_to_add = set(new_instances) - set(existing_instances)
+ if self.purge_instance_ids:
+ instances_to_remove = set(existing_instances) - set(new_instances)
+ else:
+ instances_to_remove = []
+
+ changed = False
+
+ changed |= self._change_instances(self.client.register_instances_with_load_balancer,
+ instances_to_add)
+ if self.wait:
+ self._wait_for_instance_state('instance_in_service', list(instances_to_add))
+ changed |= self._change_instances(self.client.deregister_instances_from_load_balancer,
+ instances_to_remove)
+ if self.wait:
+ self._wait_for_instance_state('instance_deregistered', list(instances_to_remove))
+
+ return changed
+
+ def _get_tags(self):
+ tags = self.client.describe_tags(aws_retry=True,
+ LoadBalancerNames=[self.name])
+ if not tags:
+ return {}
+ try:
+ tags = tags['TagDescriptions'][0]['Tags']
+ except (KeyError, TypeError):
+ return {}
+ return boto3_tag_list_to_ansible_dict(tags)
+
+ def _add_tags(self, tags_to_set):
+ if not tags_to_set:
+ return False
+ self.changed = True
+ if self.check_mode:
+ return True
+ tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set)
+ self.client.add_tags(LoadBalancerNames=[self.name], Tags=tags_to_add)
+ return True
+
+ def _remove_tags(self, tags_to_unset):
+ if not tags_to_unset:
+ return False
+ self.changed = True
+ if self.check_mode:
+ return True
+ tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset]
+ self.client.remove_tags(LoadBalancerNames=[self.name], Tags=tags_to_remove)
+ return True
+
+ def _set_tags(self):
+ """Add/Delete tags"""
+ if self.tags is None:
+ return False
+
+ try:
+ current_tags = self._get_tags()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to get load balancer tags")
+
+ tags_to_set, tags_to_unset = compare_aws_tags(current_tags, self.tags,
+ self.purge_tags)
+
+ changed = False
+ try:
+ changed |= self._remove_tags(tags_to_unset)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to remove load balancer tags")
+ try:
+ changed |= self._add_tags(tags_to_set)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Failed to add load balancer tags")
+
+ return changed
+
+ def _validate_stickiness(self, stickiness):
+ problem_found = False
+ if not stickiness:
+ return problem_found
+ if not stickiness['enabled']:
+ return problem_found
+ if stickiness['type'] == 'application':
+ if not stickiness.get('cookie'):
+ problem_found = True
+ self.module.fail_json(
+ msg='cookie must be specified when stickiness type is "application"',
+ stickiness=stickiness,
+ )
+ if stickiness.get('expiration'):
+ self.warn(
+ msg='expiration is ignored when stickiness type is "application"',)
+ if stickiness['type'] == 'loadbalancer':
+ if stickiness.get('cookie'):
+ self.warn(
+ msg='cookie is ignored when stickiness type is "loadbalancer"',)
+ return problem_found
+
+ def _validate_access_logs(self, access_logs):
+ problem_found = False
+ if not access_logs:
+ return problem_found
+ if not access_logs['enabled']:
+ return problem_found
+ if not access_logs.get('s3_location', None):
+ problem_found = True
+ self.module.fail_json(
+ msg='s3_location must be provided when access_logs.state is "present"')
+ return problem_found
+
+ def _validate_creation_requirements(self):
+ if self.elb:
+ return False
+ problem_found = False
+ if not self.subnets and not self.zones:
+ problem_found = True
+ self.module.fail_json(
+ msg='One of subnets or zones must be provided when creating an ELB')
+ if not self.listeners:
+ problem_found = True
+ self.module.fail_json(
+ msg='listeners must be provided when creating an ELB')
+ return problem_found
+
+ def _validate_listeners(self, listeners):
+ if not listeners:
+ return False
+ return any(self._validate_listener(listener) for listener in listeners)
+
+ def _validate_listener(self, listener):
+ problem_found = False
+ if not listener:
+ return problem_found
+ for protocol in ['instance_protocol', 'protocol']:
+ value = listener.get(protocol, None)
+ problem = self._validate_protocol(value)
+ problem_found |= problem
+ if problem:
+ self.module.fail_json(
+ msg='Invalid protocol ({0}) in listener'.format(value),
+ listener=listener)
+ return problem_found
+
+ def _validate_health_check(self, health_check):
+ if not health_check:
+ return False
+ protocol = health_check['ping_protocol']
+ if self._validate_protocol(protocol):
+ self.module.fail_json(
+ msg='Invalid protocol ({0}) defined in health check'.format(protocol),
+ health_check=health_check,)
+ if protocol.upper() in ['HTTP', 'HTTPS']:
+ if not health_check['ping_path']:
+ self.module.fail_json(
+ msg='For HTTP and HTTPS health checks a ping_path must be provided',
+ health_check=health_check,)
+ return False
+
+ def _validate_protocol(self, protocol):
+ if not protocol:
+ return False
+ return protocol.upper() not in ['HTTP', 'HTTPS', 'TCP', 'SSL']
+
+ @AWSRetry.jittered_backoff()
+ def _describe_loadbalancer(self, lb_name):
+ paginator = self.client.get_paginator('describe_load_balancers')
+ return paginator.paginate(LoadBalancerNames=[lb_name]).build_full_result()['LoadBalancerDescriptions']
+
+ def _get_vpc_from_subnets(self, subnets):
+ if not subnets:
+ return None
+
+ subnet_details = self._describe_subnets(list(subnets))
+ vpc_ids = set(subnet['VpcId'] for subnet in subnet_details)
+
+ if not vpc_ids:
+ return None
+ if len(vpc_ids) > 1:
+ self.module.fail_json("Subnets for an ELB may not span multiple VPCs",
+ subnets=subnet_details, vpc_ids=vpc_ids)
+ return vpc_ids.pop()
+
+ @AWSRetry.jittered_backoff()
+ def _describe_subnets(self, subnet_ids):
+ paginator = self.ec2_client.get_paginator('describe_subnets')
+ return paginator.paginate(SubnetIds=subnet_ids).build_full_result()['Subnets']
+
+ # Wrap it so we get the backoff
+ @AWSRetry.jittered_backoff()
+ def _get_ec2_security_group_ids_from_names(self, **params):
+ return get_ec2_security_group_ids_from_names(ec2_connection=self.ec2_client, **params)
+
+
+def main():
+
+ access_log_spec = dict(
+ enabled=dict(required=False, type='bool', default=True),
+ s3_location=dict(required=False, type='str'),
+ s3_prefix=dict(required=False, type='str', default=""),
+ interval=dict(required=False, type='int', default=60, choices=[5, 60]),
+ )
+
+ stickiness_spec = dict(
+ type=dict(required=False, type='str', choices=['application', 'loadbalancer']),
+ enabled=dict(required=False, type='bool', default=True),
+ cookie=dict(required=False, type='str'),
+ expiration=dict(required=False, type='int')
+ )
+
+ healthcheck_spec = dict(
+ ping_protocol=dict(required=True, type='str'),
+ ping_path=dict(required=False, type='str'),
+ ping_port=dict(required=True, type='int'),
+ interval=dict(required=True, type='int'),
+ timeout=dict(aliases=['response_timeout'], required=True, type='int'),
+ unhealthy_threshold=dict(required=True, type='int'),
+ healthy_threshold=dict(required=True, type='int'),
+ )
+
+ listeners_spec = dict(
+ load_balancer_port=dict(required=True, type='int'),
+ instance_port=dict(required=True, type='int'),
+ ssl_certificate_id=dict(required=False, type='str'),
+ protocol=dict(required=True, type='str'),
+ instance_protocol=dict(required=False, type='str'),
+ proxy_protocol=dict(required=False, type='bool'),
+ )
+
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ listeners=dict(type='list', elements='dict', options=listeners_spec),
+ purge_listeners=dict(default=True, type='bool'),
+ instance_ids=dict(type='list', elements='str'),
+ purge_instance_ids=dict(default=False, type='bool'),
+ zones=dict(type='list', elements='str'),
+ purge_zones=dict(default=False, type='bool'),
+ security_group_ids=dict(type='list', elements='str'),
+ security_group_names=dict(type='list', elements='str'),
+ health_check=dict(type='dict', options=healthcheck_spec),
+ subnets=dict(type='list', elements='str'),
+ purge_subnets=dict(default=False, type='bool'),
+ scheme=dict(choices=['internal', 'internet-facing']),
+ connection_draining_timeout=dict(type='int'),
+ idle_timeout=dict(type='int'),
+ cross_az_load_balancing=dict(type='bool'),
+ stickiness=dict(type='dict', options=stickiness_spec),
+ access_logs=dict(type='dict', options=access_log_spec),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=180, type='int'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, type='bool'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['security_group_ids', 'security_group_names'],
+ ['zones', 'subnets'],
+ ],
+ supports_check_mode=True,
+ )
+
+ wait_timeout = module.params['wait_timeout']
+ state = module.params['state']
+
+ if wait_timeout > 600:
+ module.fail_json(msg='wait_timeout maximum is 600 seconds')
+
+ elb_man = ElbManager(module)
+ elb_man.validate_params(state)
+
+ if state == 'present':
+ elb_man.ensure_ok()
+ # original boto style
+ elb = elb_man.get_info()
+ # boto3 style
+ lb = elb_man.get_load_balancer()
+ ec2_result = dict(elb=elb, load_balancer=lb)
+ elif state == 'absent':
+ elb_man.ensure_gone()
+ # original boto style
+ elb = elb_man.get_info()
+ ec2_result = dict(elb=elb)
+
+ module.exit_json(
+ changed=elb_man.changed,
+ **ec2_result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py
new file mode 100644
index 00000000..8eef4030
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_policy
+version_added: 5.0.0
+short_description: Manage inline IAM policies for users, groups, and roles
+description:
+ - Allows uploading or removing inline IAM policies for IAM users, groups or roles.
+ - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role),
+ M(community.aws.iam_group) and M(community.aws.iam_managed_policy)
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ iam_type:
+ description:
+ - Type of IAM resource.
+ required: true
+ choices: [ "user", "group", "role"]
+ type: str
+ iam_name:
+ description:
+ - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - The name label for the policy to create or remove.
+ required: true
+ type: str
+ policy_json:
+ description:
+ - A properly json formatted policy as string.
+ type: json
+ state:
+ description:
+ - Whether to create or delete the IAM policy.
+ choices: [ "present", "absent"]
+ default: present
+ type: str
+ skip_duplicates:
+ description:
+ - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in.
+ If there is a match it will not make a new policy object with the same rules.
+ default: false
+ type: bool
+
+author:
+ - "Jonathan I. Davila (@defionscode)"
+ - "Dennis Podkovyrin (@sbj-ss)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+# Advanced example, create two new groups and add a READ-ONLY policy to both
+# groups.
+- name: Create Two Groups, Mario and Luigi
+ community.aws.iam_group:
+ name: "{{ item }}"
+ state: present
+ loop:
+ - Mario
+ - Luigi
+ register: new_groups
+
+- name: Apply READ-ONLY policy to new groups that have been recently created
+ amazon.aws.iam_policy:
+ iam_type: group
+ iam_name: "{{ item.iam_group.group.group_name }}"
+ policy_name: "READ-ONLY"
+ policy_json: "{{ lookup('template', 'readonly.json.j2') }}"
+ state: present
+ loop: "{{ new_groups.results }}"
+
+# Create a new S3 policy with prefix per user
+- name: Create S3 policy from template
+ amazon.aws.iam_policy:
+ iam_type: user
+ iam_name: "{{ item.user }}"
+ policy_name: "s3_limited_access_{{ item.prefix }}"
+ state: present
+ policy_json: "{{ lookup('template', 's3_policy.json.j2') }}"
+ loop:
+ - user: s3_user
+ prefix: s3_user_prefix
+
+'''
+RETURN = '''
+policy_names:
+ description: A list of names of the inline policies embedded in the specified IAM resource (user, group, or role).
+ returned: always
+ type: list
+ elements: str
+'''
+
+import json
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from ansible.module_utils.six import string_types
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+
+
+class PolicyError(Exception):
+ pass
+
+
+class Policy:
+
+ def __init__(self, client, name, policy_name, policy_json, skip_duplicates, state, check_mode):
+ self.client = client
+ self.name = name
+ self.policy_name = policy_name
+ self.policy_json = policy_json
+ self.skip_duplicates = skip_duplicates
+ self.state = state
+ self.check_mode = check_mode
+ self.changed = False
+
+ self.original_policies = self.get_all_policies().copy()
+ self.updated_policies = {}
+
+ @staticmethod
+ def _iam_type():
+ return ''
+
+ def _list(self, name):
+ return {}
+
+ def list(self):
+ try:
+ return self._list(self.name).get('PolicyNames', [])
+ except is_boto3_error_code('AccessDenied'):
+ return []
+
+ def _get(self, name, policy_name):
+ return '{}'
+
+ def get(self, policy_name):
+ try:
+ return self._get(self.name, policy_name)['PolicyDocument']
+ except is_boto3_error_code('AccessDenied'):
+ return {}
+
+ def _put(self, name, policy_name, policy_doc):
+ pass
+
+ def put(self, policy_doc):
+ self.changed = True
+
+ if self.check_mode:
+ return
+
+ self._put(self.name, self.policy_name, json.dumps(policy_doc, sort_keys=True))
+
+ def _delete(self, name, policy_name):
+ pass
+
+ def delete(self):
+ self.updated_policies = self.original_policies.copy()
+
+ if self.policy_name not in self.list():
+ self.changed = False
+ return
+
+ self.changed = True
+ self.updated_policies.pop(self.policy_name, None)
+
+ if self.check_mode:
+ return
+
+ self._delete(self.name, self.policy_name)
+
+ def get_policy_text(self):
+ try:
+ if self.policy_json is not None:
+ return self.get_policy_from_json()
+ except json.JSONDecodeError as e:
+ raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e))
+ return None
+
+ def get_policy_from_json(self):
+ if isinstance(self.policy_json, string_types):
+ pdoc = json.loads(self.policy_json)
+ else:
+ pdoc = self.policy_json
+ return pdoc
+
+ def get_all_policies(self):
+ policies = {}
+ for pol in self.list():
+ policies[pol] = self.get(pol)
+ return policies
+
+ def create(self):
+ matching_policies = []
+ policy_doc = self.get_policy_text()
+ policy_match = False
+ for pol in self.list():
+ if not compare_policies(self.original_policies[pol], policy_doc):
+ matching_policies.append(pol)
+ policy_match = True
+
+ self.updated_policies = self.original_policies.copy()
+
+ if self.policy_name in matching_policies:
+ return
+ if self.skip_duplicates and policy_match:
+ return
+
+ self.put(policy_doc)
+ self.updated_policies[self.policy_name] = policy_doc
+
+ def run(self):
+ if self.state == 'present':
+ self.create()
+ elif self.state == 'absent':
+ self.delete()
+ return {
+ 'changed': self.changed,
+ self._iam_type() + '_name': self.name,
+ 'policies': self.list(),
+ 'policy_names': self.list(),
+ 'diff': dict(
+ before=self.original_policies,
+ after=self.updated_policies,
+ ),
+ }
+
+
+class UserPolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'user'
+
+ def _list(self, name):
+ return self.client.list_user_policies(aws_retry=True, UserName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name)
+
+ def _put(self, name, policy_name, policy_doc):
+ return self.client.put_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
+
+ def _delete(self, name, policy_name):
+ return self.client.delete_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name)
+
+
+class RolePolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'role'
+
+ def _list(self, name):
+ return self.client.list_role_policies(aws_retry=True, RoleName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name)
+
+ def _put(self, name, policy_name, policy_doc):
+ return self.client.put_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
+
+ def _delete(self, name, policy_name):
+ return self.client.delete_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name)
+
+
+class GroupPolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'group'
+
+ def _list(self, name):
+ return self.client.list_group_policies(aws_retry=True, GroupName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name)
+
+ def _put(self, name, policy_name, policy_doc):
+ return self.client.put_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
+
+ def _delete(self, name, policy_name):
+ return self.client.delete_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name)
+
+
+def main():
+ argument_spec = dict(
+ iam_type=dict(required=True, choices=['user', 'group', 'role']),
+ state=dict(default='present', choices=['present', 'absent']),
+ iam_name=dict(required=True),
+ policy_name=dict(required=True),
+ policy_json=dict(type='json', default=None, required=False),
+ skip_duplicates=dict(type='bool', default=False, required=False)
+ )
+ required_if = [
+ ('state', 'present', ('policy_json',), True),
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True
+ )
+
+ args = dict(
+ client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()),
+ name=module.params.get('iam_name'),
+ policy_name=module.params.get('policy_name'),
+ policy_json=module.params.get('policy_json'),
+ skip_duplicates=module.params.get('skip_duplicates'),
+ state=module.params.get('state'),
+ check_mode=module.check_mode,
+ )
+ iam_type = module.params.get('iam_type')
+
+ try:
+ if iam_type == 'user':
+ policy = UserPolicy(**args)
+ elif iam_type == 'role':
+ policy = RolePolicy(**args)
+ elif iam_type == 'group':
+ policy = GroupPolicy(**args)
+
+ module.deprecate("The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are returned for now.",
+ date='2024-08-01', collection_name='amazon.aws')
+
+ module.exit_json(**(policy.run()))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+ except PolicyError as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py
new file mode 100644
index 00000000..125f55e1
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_policy_info
+version_added: 5.0.0
+short_description: Retrieve inline IAM policies for users, groups, and roles
+description:
+ - Supports fetching of inline IAM policies for IAM users, groups and roles.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ iam_type:
+ description:
+ - Type of IAM resource you wish to retrieve inline policies for.
+ required: true
+ choices: [ "user", "group", "role"]
+ type: str
+ iam_name:
+ description:
+ - Name of IAM resource you wish to retrieve inline policies for. In other words, the user name, group name or role name.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - Name of a specific IAM inline policy you with to retrieve.
+ required: false
+ type: str
+author:
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: Describe all inline IAM policies on an IAM User
+ amazon.aws.iam_policy_info:
+ iam_type: user
+ iam_name: example_user
+
+- name: Describe a specific inline policy on an IAM Role
+ amazon.aws.iam_policy_info:
+ iam_type: role
+ iam_name: example_role
+ policy_name: example_policy
+
+'''
+RETURN = '''
+policies:
+ description: A list containing the matching IAM inline policy names and their data
+ returned: success
+ type: complex
+ contains:
+ policy_name:
+ description: The Name of the inline policy
+ returned: success
+ type: str
+ policy_document:
+ description: The JSON document representing the inline IAM policy
+ returned: success
+ type: list
+policy_names:
+ description: A list of matching names of the IAM inline policies on the queried object
+ returned: success
+ type: list
+all_policy_names:
+ description: A list of names of all of the IAM inline policies on the queried object
+ returned: success
+ type: list
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+class Policy:
+
+ def __init__(self, client, name, policy_name):
+ self.client = client
+ self.name = name
+ self.policy_name = policy_name
+ self.changed = False
+
+ @staticmethod
+ def _iam_type():
+ return ''
+
+ def _list(self, name):
+ return {}
+
+ def list(self):
+ return self._list(self.name).get('PolicyNames', [])
+
+ def _get(self, name, policy_name):
+ return '{}'
+
+ def get(self, policy_name):
+ return self._get(self.name, policy_name)['PolicyDocument']
+
+ def get_all(self):
+ policies = list()
+ for policy in self.list():
+ policies.append({"policy_name": policy, "policy_document": self.get(policy)})
+ return policies
+
+ def run(self):
+ policy_list = self.list()
+ ret_val = {
+ 'changed': False,
+ self._iam_type() + '_name': self.name,
+ 'all_policy_names': policy_list
+ }
+ if self.policy_name is None:
+ ret_val.update(policies=self.get_all())
+ ret_val.update(policy_names=policy_list)
+ elif self.policy_name in policy_list:
+ ret_val.update(policies=[{
+ "policy_name": self.policy_name,
+ "policy_document": self.get(self.policy_name)}])
+ ret_val.update(policy_names=[self.policy_name])
+ return ret_val
+
+
+class UserPolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'user'
+
+ def _list(self, name):
+ return self.client.list_user_policies(aws_retry=True, UserName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name)
+
+
+class RolePolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'role'
+
+ def _list(self, name):
+ return self.client.list_role_policies(aws_retry=True, RoleName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name)
+
+
+class GroupPolicy(Policy):
+
+ @staticmethod
+ def _iam_type():
+ return 'group'
+
+ def _list(self, name):
+ return self.client.list_group_policies(aws_retry=True, GroupName=name)
+
+ def _get(self, name, policy_name):
+ return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name)
+
+
+def main():
+ argument_spec = dict(
+ iam_type=dict(required=True, choices=['user', 'group', 'role']),
+ iam_name=dict(required=True),
+ policy_name=dict(default=None, required=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ args = dict(
+ client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()),
+ name=module.params.get('iam_name'),
+ policy_name=module.params.get('policy_name'),
+ )
+ iam_type = module.params.get('iam_type')
+
+ try:
+ if iam_type == 'user':
+ policy = UserPolicy(**args)
+ elif iam_type == 'role':
+ policy = RolePolicy(**args)
+ elif iam_type == 'group':
+ policy = GroupPolicy(**args)
+
+ module.exit_json(**(policy.run()))
+ except is_boto3_error_code('NoSuchEntity') as e:
+ module.exit_json(changed=False, msg=e.response['Error']['Message'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_user.py b/ansible_collections/amazon/aws/plugins/modules/iam_user.py
new file mode 100644
index 00000000..af5ea803
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_user.py
@@ -0,0 +1,580 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iam_user
+version_added: 5.0.0
+short_description: Manage AWS IAM users
+description:
+ - A module to manage AWS IAM users.
+ - The module does not manage groups that users belong to, groups memberships can be managed using M(community.aws.iam_group).
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - Josh Souza (@joshsouza)
+options:
+ name:
+ description:
+ - The name of the user to create.
+ required: true
+ type: str
+ password:
+ description:
+ - The password to apply to the user.
+ required: false
+ type: str
+ version_added: 2.2.0
+ version_added_collection: community.aws
+ password_reset_required:
+ description:
+ - Defines if the user is required to set a new password after login.
+ required: false
+ type: bool
+ default: false
+ version_added: 3.1.0
+ version_added_collection: community.aws
+ update_password:
+ default: always
+ choices: ['always', 'on_create']
+ description:
+ - When to update user passwords.
+ - I(update_password=always) will ensure the password is set to I(password).
+ - I(update_password=on_create) will only set the password for newly created users.
+ type: str
+ version_added: 2.2.0
+ version_added_collection: community.aws
+ remove_password:
+ description:
+ - Option to delete user login passwords.
+ - This field is mutually exclusive to I(password).
+ type: 'bool'
+ version_added: 2.2.0
+ version_added_collection: community.aws
+ managed_policies:
+ description:
+ - A list of managed policy ARNs or friendly names to attach to the user.
+ - To embed an inline policy, use M(community.aws.iam_policy).
+ required: false
+ type: list
+ elements: str
+ aliases: ['managed_policy']
+ state:
+ description:
+ - Create or remove the IAM user.
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ purge_policies:
+ description:
+ - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detached.
+ required: false
+ default: false
+ type: bool
+ aliases: ['purge_policy', 'purge_managed_policies']
+ wait:
+ description:
+ - When I(wait=True) the module will wait for up to I(wait_timeout) seconds
+ for IAM user creation before returning.
+ default: True
+ type: bool
+ version_added: 2.2.0
+ version_added_collection: community.aws
+ wait_timeout:
+ description:
+ - How long (in seconds) to wait for creation / updates to complete.
+ default: 120
+ type: int
+ version_added: 2.2.0
+ version_added_collection: community.aws
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 2.1.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Note: This module does not allow management of groups that users belong to.
+# Groups should manage their membership directly using community.aws.iam_group,
+# as users belong to them.
+
+- name: Create a user
+ amazon.aws.iam_user:
+ name: testuser1
+ state: present
+
+- name: Create a user with a password
+ amazon.aws.iam_user:
+ name: testuser1
+ password: SomeSecurePassword
+ state: present
+
+- name: Create a user and attach a managed policy using its ARN
+ amazon.aws.iam_user:
+ name: testuser1
+ managed_policies:
+ - arn:aws:iam::aws:policy/AmazonSNSFullAccess
+ state: present
+
+- name: Remove all managed policies from an existing user with an empty list
+ amazon.aws.iam_user:
+ name: testuser1
+ state: present
+ purge_policies: true
+
+- name: Create user with tags
+ amazon.aws.iam_user:
+ name: testuser1
+ state: present
+ tags:
+ Env: Prod
+
+- name: Delete the user
+ amazon.aws.iam_user:
+ name: testuser1
+ state: absent
+
+'''
+RETURN = r'''
+user:
+ description: dictionary containing all the user information
+ returned: success
+ type: complex
+ contains:
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the user
+ type: str
+ sample: "arn:aws:iam::123456789012:user/testuser1"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the user was created
+ type: str
+ sample: "2017-02-08T04:36:28+00:00"
+ user_id:
+ description: the stable and unique string identifying the user
+ type: str
+ sample: "AGPA12345EXAMPLE54321"
+ user_name:
+ description: the friendly name that identifies the user
+ type: str
+ sample: "testuser1"
+ path:
+ description: the path to the user
+ type: str
+ sample: "/"
+ tags:
+ description: user tags
+ type: dict
+ returned: always
+ sample: {"Env": "Prod"}
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+
+def compare_attached_policies(current_attached_policies, new_attached_policies):
+
+ # If new_attached_policies is None it means we want to remove all policies
+ if len(current_attached_policies) > 0 and new_attached_policies is None:
+ return False
+
+ current_attached_policies_arn_list = []
+ for policy in current_attached_policies:
+ current_attached_policies_arn_list.append(policy['PolicyArn'])
+
+ if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)):
+ return True
+ else:
+ return False
+
+
+def convert_friendly_names_to_arns(connection, module, policy_names):
+
+ # List comprehension that looks for any policy in the 'policy_names' list
+ # that does not begin with 'arn'. If there aren't any, short circuit.
+ # If there are, translate friendly name to the full arn
+ if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None):
+ return policy_names
+ allpolicies = {}
+ paginator = connection.get_paginator('list_policies')
+ policies = paginator.paginate().build_full_result()['Policies']
+
+ for policy in policies:
+ allpolicies[policy['PolicyName']] = policy['Arn']
+ allpolicies[policy['Arn']] = policy['Arn']
+ try:
+ return [allpolicies[policy] for policy in policy_names]
+ except KeyError as e:
+ module.fail_json(msg="Couldn't find policy: " + str(e))
+
+
+def wait_iam_exists(connection, module):
+
+ user_name = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+
+ delay = min(wait_timeout, 5)
+ max_attempts = wait_timeout // delay
+
+ try:
+ waiter = connection.get_waiter('user_exists')
+ waiter.wait(
+ WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts},
+ UserName=user_name,
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='Timeout while waiting on IAM user creation')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed while waiting on IAM user creation')
+
+
+def create_or_update_login_profile(connection, module):
+
+ # Apply new password / update password for the user
+ user_params = dict()
+ user_params['UserName'] = module.params.get('name')
+ user_params['Password'] = module.params.get('password')
+ user_params['PasswordResetRequired'] = module.params.get('password_reset_required')
+ retval = {}
+
+ try:
+ retval = connection.update_login_profile(**user_params)
+ except is_boto3_error_code('NoSuchEntity'):
+ # Login profile does not yet exist - create it
+ try:
+ retval = connection.create_login_profile(**user_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create user login profile")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to update user login profile")
+
+ return True, retval
+
+
+def delete_login_profile(connection, module):
+ '''
+ Deletes a users login profile.
+ Parameters:
+ connection: IAM client
+ module: AWSModule
+ Returns:
+ (bool): True if login profile deleted, False if no login profile found to delete
+ '''
+ user_params = dict()
+ user_params['UserName'] = module.params.get('name')
+
+ # User does not have login profile - nothing to delete
+ if not user_has_login_profile(connection, module, user_params['UserName']):
+ return False
+
+ if not module.check_mode:
+ try:
+ connection.delete_login_profile(**user_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to delete user login profile")
+
+ return True
+
+
+def create_or_update_user(connection, module):
+
+ params = dict()
+ params['UserName'] = module.params.get('name')
+ managed_policies = module.params.get('managed_policies')
+ purge_policies = module.params.get('purge_policies')
+
+ if module.params.get('tags') is not None:
+ params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
+
+ changed = False
+
+ if managed_policies:
+ managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
+
+ # Get user
+ user = get_user(connection, module, params['UserName'])
+
+ # If user is None, create it
+ new_login_profile = False
+ if user is None:
+ # Check mode means we would create the user
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ connection.create_user(**params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create user")
+
+ # Wait for user to be fully available before continuing
+ if module.params.get('wait'):
+ wait_iam_exists(connection, module)
+
+ if module.params.get('password') is not None:
+ login_profile_result, login_profile_data = create_or_update_login_profile(connection, module)
+
+ if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False):
+ new_login_profile = True
+ else:
+ login_profile_result = None
+ update_result = update_user_tags(connection, module, params, user)
+
+ if module.params['update_password'] == "always" and module.params.get('password') is not None:
+ # Can't compare passwords, so just return changed on check mode runs
+ if module.check_mode:
+ module.exit_json(changed=True)
+ login_profile_result, login_profile_data = create_or_update_login_profile(connection, module)
+
+ if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False):
+ new_login_profile = True
+
+ elif module.params.get('remove_password'):
+ login_profile_result = delete_login_profile(connection, module)
+
+ changed = bool(update_result) or bool(login_profile_result)
+
+ # Manage managed policies
+ current_attached_policies = get_attached_policy_list(connection, module, params['UserName'])
+ if not compare_attached_policies(current_attached_policies, managed_policies):
+ current_attached_policies_arn_list = []
+ for policy in current_attached_policies:
+ current_attached_policies_arn_list.append(policy['PolicyArn'])
+
+ # If managed_policies has a single empty element we want to remove all attached policies
+ if purge_policies:
+ # Detach policies not present
+ for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to detach policy {0} from user {1}".format(
+ policy_arn, params['UserName']))
+
+ # If there are policies to adjust that aren't in the current list, then things have changed
+ # Otherwise the only changes were in purging above
+ if set(managed_policies).difference(set(current_attached_policies_arn_list)):
+ changed = True
+ # If there are policies in managed_policies attach each policy
+ if managed_policies != [None] and not module.check_mode:
+ for policy_arn in managed_policies:
+ try:
+ connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to attach policy {0} to user {1}".format(
+ policy_arn, params['UserName']))
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ # Get the user again
+ user = get_user(connection, module, params['UserName'])
+ if changed and new_login_profile:
+ # `LoginProfile` is only returned on `create_login_profile` method
+ user['user']['password_reset_required'] = login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False)
+
+ module.exit_json(changed=changed, iam_user=user, user=user['user'])
+
+
+def destroy_user(connection, module):
+
+ user_name = module.params.get('name')
+
+ user = get_user(connection, module, user_name)
+ # User is not present
+ if not user:
+ module.exit_json(changed=False)
+
+ # Check mode means we would remove this user
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Remove any attached policies otherwise deletion fails
+ try:
+ for policy in get_attached_policy_list(connection, module, user_name):
+ connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name))
+
+ try:
+ # Remove user's access keys
+ access_keys = connection.list_access_keys(UserName=user_name)["AccessKeyMetadata"]
+ for access_key in access_keys:
+ connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"])
+
+ # Remove user's login profile (console password)
+ delete_login_profile(connection, module)
+
+ # Remove user's ssh public keys
+ ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"]
+ for ssh_public_key in ssh_public_keys:
+ connection.delete_ssh_public_key(UserName=user_name, SSHPublicKeyId=ssh_public_key["SSHPublicKeyId"])
+
+ # Remove user's service specific credentials
+ service_credentials = connection.list_service_specific_credentials(UserName=user_name)["ServiceSpecificCredentials"]
+ for service_specific_credential in service_credentials:
+ connection.delete_service_specific_credential(
+ UserName=user_name,
+ ServiceSpecificCredentialId=service_specific_credential["ServiceSpecificCredentialId"]
+ )
+
+ # Remove user's signing certificates
+ signing_certificates = connection.list_signing_certificates(UserName=user_name)["Certificates"]
+ for signing_certificate in signing_certificates:
+ connection.delete_signing_certificate(
+ UserName=user_name,
+ CertificateId=signing_certificate["CertificateId"]
+ )
+
+ # Remove user's MFA devices
+ mfa_devices = connection.list_mfa_devices(UserName=user_name)["MFADevices"]
+ for mfa_device in mfa_devices:
+ connection.deactivate_mfa_device(UserName=user_name, SerialNumber=mfa_device["SerialNumber"])
+
+ # Remove user's inline policies
+ inline_policies = connection.list_user_policies(UserName=user_name)["PolicyNames"]
+ for policy_name in inline_policies:
+ connection.delete_user_policy(UserName=user_name, PolicyName=policy_name)
+
+ # Remove user's group membership
+ user_groups = connection.list_groups_for_user(UserName=user_name)["Groups"]
+ for group in user_groups:
+ connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"])
+
+ connection.delete_user(UserName=user_name)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name))
+
+ module.exit_json(changed=True)
+
+
+def get_user(connection, module, name):
+
+ params = dict()
+ params['UserName'] = name
+
+ try:
+ user = connection.get_user(**params)
+ except is_boto3_error_code('NoSuchEntity'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to get user {0}".format(name))
+
+ tags = boto3_tag_list_to_ansible_dict(user['User'].pop('Tags', []))
+ user = camel_dict_to_snake_dict(user)
+ user['user']['tags'] = tags
+ return user
+
+
+def get_attached_policy_list(connection, module, name):
+
+ try:
+ return connection.list_attached_user_policies(UserName=name)['AttachedPolicies']
+ except is_boto3_error_code('NoSuchEntity'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name))
+
+
+def user_has_login_profile(connection, module, name):
+ '''
+ Returns whether or not given user has a login profile.
+ Parameters:
+ connection: IAM client
+ module: AWSModule
+ name (str): Username of user
+ Returns:
+ (bool): True if user had login profile, False if not
+ '''
+ try:
+ connection.get_login_profile(UserName=name)
+ except is_boto3_error_code('NoSuchEntity'):
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to get login profile for user {0}".format(name))
+ return True
+
+
+def update_user_tags(connection, module, params, user):
+ user_name = params['UserName']
+ existing_tags = user['user']['tags']
+ new_tags = params.get('Tags')
+ if new_tags is None:
+ return False
+ new_tags = boto3_tag_list_to_ansible_dict(new_tags)
+
+ purge_tags = module.params.get('purge_tags')
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
+
+ if not module.check_mode:
+ try:
+ if tags_to_remove:
+ connection.untag_user(UserName=user_name, TagKeys=tags_to_remove)
+ if tags_to_add:
+ connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for user %s' % user_name)
+
+ changed = bool(tags_to_add) or bool(tags_to_remove)
+ return changed
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ password=dict(type='str', no_log=True),
+ password_reset_required=dict(type='bool', default=False, no_log=False),
+ update_password=dict(default='always', choices=['always', 'on_create'], no_log=False),
+ remove_password=dict(type='bool', no_log=False),
+ managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'),
+ state=dict(choices=['present', 'absent'], required=True),
+ purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=120, type='int'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['password', 'remove_password']],
+ )
+
+ module.deprecate("The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.",
+ date='2024-05-01', collection_name='amazon.aws')
+
+ connection = module.client('iam')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_user(connection, module)
+ else:
+ destroy_user(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py
new file mode 100644
index 00000000..e9c95edc
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: iam_user_info
+version_added: 5.0.0
+short_description: Gather IAM user(s) facts in AWS
+description:
+ - This module can be used to gather IAM user(s) facts in AWS.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - Constantin Bugneac (@Constantin07)
+ - Abhijeet Kasurde (@Akasurde)
+options:
+ name:
+ description:
+ - The name of the IAM user to look for.
+ required: false
+ type: str
+ group:
+ description:
+ - The group name name of the IAM user to look for. Mutually exclusive with C(path).
+ required: false
+ type: str
+ path:
+ description:
+ - The path to the IAM user. Mutually exclusive with C(group).
+ - If specified, then would get all user names whose path starts with user provided value.
+ required: false
+ default: '/'
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Gather facts about "test" user.
+- name: Get IAM user info
+ amazon.aws.iam_user_info:
+ name: "test"
+
+# Gather facts about all users in the "dev" group.
+- name: Get IAM user info
+ amazon.aws.iam_user_info:
+ group: "dev"
+
+# Gather facts about all users with "/division_abc/subdivision_xyz/" path.
+- name: Get IAM user info
+ amazon.aws.iam_user_info:
+ path: "/division_abc/subdivision_xyz/"
+'''
+
+RETURN = r'''
+iam_users:
+ description: list of maching iam users
+ returned: success
+ type: complex
+ contains:
+ arn:
+ description: the ARN of the user
+ returned: if user exists
+ type: str
+ sample: "arn:aws:iam::123456789012:user/dev/test_user"
+ create_date:
+ description: the datetime user was created
+ returned: if user exists
+ type: str
+ sample: "2016-05-24T12:24:59+00:00"
+ password_last_used:
+ description: the last datetime the password was used by user
+ returned: if password was used at least once
+ type: str
+ sample: "2016-05-25T13:39:11+00:00"
+ path:
+ description: the path to user
+ returned: if user exists
+ type: str
+ sample: "/dev/"
+ user_id:
+ description: the unique user id
+ returned: if user exists
+ type: str
+ sample: "AIDUIOOCQKTUGI6QJLGH2"
+ user_name:
+ description: the user name
+ returned: if user exists
+ type: str
+ sample: "test_user"
+ tags:
+ description: User tags.
+ type: dict
+ returned: if user exists
+ sample: '{"Env": "Prod"}'
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.exponential_backoff()
+def list_iam_users_with_backoff(client, operation, **kwargs):
+ paginator = client.get_paginator(operation)
+ return paginator.paginate(**kwargs).build_full_result()
+
+
+def describe_iam_user(user):
+ tags = boto3_tag_list_to_ansible_dict(user.pop('Tags', []))
+ user = camel_dict_to_snake_dict(user)
+ user['tags'] = tags
+ return user
+
+
+def list_iam_users(connection, module):
+
+ name = module.params.get('name')
+ group = module.params.get('group')
+ path = module.params.get('path')
+
+ params = dict()
+ iam_users = []
+
+ if not group and not path:
+ if name:
+ params['UserName'] = name
+ try:
+ iam_users.append(connection.get_user(**params)['User'])
+ except is_boto3_error_code('NoSuchEntity'):
+ pass
+ except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name)
+
+ if group:
+ params['GroupName'] = group
+ try:
+ iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users']
+ except is_boto3_error_code('NoSuchEntity'):
+ pass
+ except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group)
+ if name:
+ iam_users = [user for user in iam_users if user['UserName'] == name]
+
+ if path and not group:
+ params['PathPrefix'] = path
+ try:
+ iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users']
+ except is_boto3_error_code('NoSuchEntity'):
+ pass
+ except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path)
+ if name:
+ iam_users = [user for user in iam_users if user['UserName'] == name]
+
+ module.exit_json(iam_users=[describe_iam_user(user) for user in iam_users])
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ group=dict(),
+ path=dict(default='/')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['group', 'path']
+ ],
+ supports_check_mode=True
+ )
+
+ connection = module.client('iam')
+
+ list_iam_users(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key.py b/ansible_collections/amazon/aws/plugins/modules/kms_key.py
new file mode 100644
index 00000000..a1cf5366
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/kms_key.py
@@ -0,0 +1,1000 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: kms_key
+version_added: 5.0.0
+short_description: Perform various KMS key management tasks
+description:
+ - Manage role/user access to a KMS key.
+ - Not designed for encrypting/decrypting.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_kms).
+ The usage did not change.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ alias:
+ description:
+ - An alias for a key.
+ - For safety, even though KMS does not require keys to have an alias, this module expects all
+ new keys to be given an alias to make them easier to manage. Existing keys without an alias
+ may be referred to by I(key_id). Use M(amazon.aws.kms_key_info) to find key ids.
+ - Note that passing a I(key_id) and I(alias) will only cause a new alias to be added, an alias will never be renamed.
+ - The C(alias/) prefix is optional.
+ - Required if I(key_id) is not given.
+ required: false
+ aliases:
+ - key_alias
+ type: str
+ key_id:
+ description:
+ - Key ID or ARN of the key.
+ - One of I(alias) or I(key_id) are required.
+ required: false
+ aliases:
+ - key_arn
+ type: str
+ enable_key_rotation:
+ description:
+ - Whether the key should be automatically rotated every year.
+ required: false
+ type: bool
+ state:
+ description:
+ - Whether a key should be present or absent.
+ - Note that making an existing key C(absent) only schedules a key for deletion.
+ - Passing a key that is scheduled for deletion with I(state=present) will cancel key deletion.
+ required: False
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ enabled:
+ description: Whether or not a key is enabled.
+ default: True
+ type: bool
+ description:
+ description:
+ - A description of the CMK.
+ - Use a description that helps you decide whether the CMK is appropriate for a task.
+ type: str
+ pending_window:
+ description:
+ - The number of days between requesting deletion of the CMK and when it will actually be deleted.
+ - Only used when I(state=absent) and the CMK has not yet been deleted.
+ - Valid values are between 7 and 30 (inclusive).
+ - 'See also: U(https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html#KMS-ScheduleKeyDeletion-request-PendingWindowInDays)'
+ type: int
+ aliases: ['deletion_delay']
+ version_added: 1.4.0
+ version_added_collection: community.aws
+ purge_grants:
+ description:
+ - Whether the I(grants) argument should cause grants not in the list to be removed.
+ default: False
+ type: bool
+ grants:
+ description:
+ - A list of grants to apply to the key. Each item must contain I(grantee_principal).
+ Each item can optionally contain I(retiring_principal), I(operations), I(constraints),
+ I(name).
+ - I(grantee_principal) and I(retiring_principal) must be ARNs
+ - 'For full documentation of suboptions see the boto3 documentation:'
+ - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)'
+ type: list
+ elements: dict
+ suboptions:
+ grantee_principal:
+ description: The full ARN of the principal being granted permissions.
+ required: true
+ type: str
+ retiring_principal:
+ description: The full ARN of the principal permitted to revoke/retire the grant.
+ type: str
+ operations:
+ type: list
+ elements: str
+ description:
+ - A list of operations that the grantee may perform using the CMK.
+ choices: ['Decrypt', 'Encrypt', 'GenerateDataKey', 'GenerateDataKeyWithoutPlaintext', 'ReEncryptFrom', 'ReEncryptTo',
+ 'CreateGrant', 'RetireGrant', 'DescribeKey', 'Verify', 'Sign']
+ constraints:
+ description:
+ - Constraints is a dict containing C(encryption_context_subset) or C(encryption_context_equals),
+ either or both being a dict specifying an encryption context match.
+ See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) or
+ U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)
+ type: dict
+ policy:
+ description:
+ - policy to apply to the KMS key.
+ - See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
+ type: json
+ key_spec:
+ aliases:
+ - customer_master_key_spec
+ description:
+ - Specifies the type of KMS key to create.
+ - The specification is not changeable once the key is created.
+ type: str
+ default: SYMMETRIC_DEFAULT
+ choices: ['SYMMETRIC_DEFAULT', 'RSA_2048', 'RSA_3072', 'RSA_4096', 'ECC_NIST_P256', 'ECC_NIST_P384', 'ECC_NIST_P521', 'ECC_SECG_P256K1']
+ version_added: 2.1.0
+ version_added_collection: community.aws
+ key_usage:
+ description:
+ - Determines the cryptographic operations for which you can use the KMS key.
+ - The usage is not changeable once the key is created.
+ type: str
+ default: ENCRYPT_DECRYPT
+ choices: ['ENCRYPT_DECRYPT', 'SIGN_VERIFY']
+ version_added: 2.1.0
+ version_added_collection: community.aws
+author:
+ - Ted Timmons (@tedder)
+ - Will Thames (@willthames)
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+notes:
+ - There are known inconsistencies in the amount of time required for updates of KMS keys to be fully reflected on AWS.
+ This can cause issues when running duplicate tasks in succession or using the M(amazon.aws.kms_key_info) module to fetch key metadata
+ shortly after modifying keys.
+ For this reason, it is recommended to use the return data from this module (M(amazon.aws.kms_key)) to fetch a key's metadata.
+'''
+
+EXAMPLES = r'''
+# Create a new KMS key
+- amazon.aws.kms_key:
+ alias: mykey
+ tags:
+ Name: myKey
+ Purpose: protect_stuff
+
+# Update previous key with more tags
+- amazon.aws.kms_key:
+ alias: mykey
+ tags:
+ Name: myKey
+ Purpose: protect_stuff
+ Owner: security_team
+
+# Update a known key with grants allowing an instance with the billing-prod IAM profile
+# to decrypt data encrypted with the environment: production, application: billing
+# encryption context
+- amazon.aws.kms_key:
+ key_id: abcd1234-abcd-1234-5678-ef1234567890
+ grants:
+ - name: billing_prod
+ grantee_principal: arn:aws:iam::123456789012:role/billing_prod
+ constraints:
+ encryption_context_equals:
+ environment: production
+ application: billing
+ operations:
+ - Decrypt
+ - RetireGrant
+
+- name: Update IAM policy on an existing KMS key
+ amazon.aws.kms_key:
+ alias: my-kms-key
+ policy: '{"Version": "2012-10-17", "Id": "my-kms-key-permissions", "Statement": [ { <SOME STATEMENT> } ]}'
+ state: present
+
+- name: Example using lookup for policy json
+ amazon.aws.kms_key:
+ alias: my-kms-key
+ policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}"
+ state: present
+'''
+
+RETURN = r'''
+key_id:
+ description: ID of key.
+ type: str
+ returned: always
+ sample: abcd1234-abcd-1234-5678-ef1234567890
+key_arn:
+ description: ARN of key.
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+key_state:
+ description:
+ - The state of the key.
+ - Will be one of C('Creating'), C('Enabled'), C('Disabled'), C('PendingDeletion'), C('PendingImport'),
+ C('PendingReplicaDeletion'), C('Unavailable'), or C('Updating').
+ type: str
+ returned: always
+ sample: PendingDeletion
+key_usage:
+ description: The cryptographic operations for which you can use the key.
+ type: str
+ returned: always
+ sample: ENCRYPT_DECRYPT
+origin:
+ description: The source of the key's key material. When this value is C(AWS_KMS),
+ AWS KMS created the key material. When this value is C(EXTERNAL), the
+ key material was imported or the CMK lacks key material.
+ type: str
+ returned: always
+ sample: AWS_KMS
+aws_account_id:
+ description: The AWS Account ID that the key belongs to.
+ type: str
+ returned: always
+ sample: 1234567890123
+creation_date:
+ description: Date and time of creation of the key.
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08.551000+10:00"
+deletion_date:
+ description: Date and time after which KMS deletes this KMS key.
+ type: str
+ returned: when key_state is PendingDeletion
+ sample: "2017-04-18T15:12:08.551000+10:00"
+ version_added: 3.3.0
+ version_added_collection: community.aws
+description:
+ description: Description of the key.
+ type: str
+ returned: always
+ sample: "My Key for Protecting important stuff"
+enabled:
+ description: Whether the key is enabled. True if I(key_state) is C(Enabled).
+ type: bool
+ returned: always
+ sample: false
+enable_key_rotation:
+ description: Whether the automatic annual key rotation is enabled. Returns None if key rotation status can't be determined.
+ type: bool
+ returned: always
+ sample: false
+aliases:
+ description: List of aliases associated with the key.
+ type: list
+ returned: always
+ sample:
+ - aws/acm
+ - aws/ebs
+policies:
+ description: List of policy documents for the key. Empty when access is denied even if there are policies.
+ type: list
+ returned: always
+ elements: str
+ sample:
+ Version: "2012-10-17"
+ Id: "auto-ebs-2"
+ Statement:
+ - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
+ Effect: "Allow"
+ Principal:
+ AWS: "*"
+ Action:
+ - "kms:Encrypt"
+ - "kms:Decrypt"
+ - "kms:ReEncrypt*"
+ - "kms:GenerateDataKey*"
+ - "kms:CreateGrant"
+ - "kms:DescribeKey"
+ Resource: "*"
+ Condition:
+ StringEquals:
+ kms:CallerAccount: "123456789012"
+ kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
+ - Sid: "Allow direct access to key metadata to the account"
+ Effect: "Allow"
+ Principal:
+ AWS: "arn:aws:iam::123456789012:root"
+ Action:
+ - "kms:Describe*"
+ - "kms:Get*"
+ - "kms:List*"
+ - "kms:RevokeGrant"
+ Resource: "*"
+key_policies:
+ description: List of policy documents for the key. Empty when access is denied even if there are policies.
+ type: list
+ returned: always
+ elements: dict
+ sample:
+ Version: "2012-10-17"
+ Id: "auto-ebs-2"
+ Statement:
+ - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
+ Effect: "Allow"
+ Principal:
+ AWS: "*"
+ Action:
+ - "kms:Encrypt"
+ - "kms:Decrypt"
+ - "kms:ReEncrypt*"
+ - "kms:GenerateDataKey*"
+ - "kms:CreateGrant"
+ - "kms:DescribeKey"
+ Resource: "*"
+ Condition:
+ StringEquals:
+ kms:CallerAccount: "123456789012"
+ kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
+ - Sid: "Allow direct access to key metadata to the account"
+ Effect: "Allow"
+ Principal:
+ AWS: "arn:aws:iam::123456789012:root"
+ Action:
+ - "kms:Describe*"
+ - "kms:Get*"
+ - "kms:List*"
+ - "kms:RevokeGrant"
+ Resource: "*"
+ version_added: 3.3.0
+ version_added_collection: community.aws
+tags:
+ description: Dictionary of tags applied to the key. Empty when access is denied even if there are tags.
+ type: dict
+ returned: always
+ sample:
+ Name: myKey
+ Purpose: protecting_stuff
+grants:
+ description: List of grants associated with a key.
+ type: list
+ elements: dict
+ returned: always
+ contains:
+ constraints:
+ description: Constraints on the encryption context that the grant allows.
+ See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
+ type: dict
+ returned: always
+ sample:
+ encryption_context_equals:
+ "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:123456789012:function:xyz"
+ creation_date:
+ description: Date of creation of the grant.
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08+10:00"
+ grant_id:
+ description: The unique ID for the grant.
+ type: str
+ returned: always
+ sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
+ grantee_principal:
+ description: The principal that receives the grant's permissions.
+ type: str
+ returned: always
+ sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz
+ issuing_account:
+ description: The AWS account under which the grant was issued.
+ type: str
+ returned: always
+ sample: arn:aws:iam::123456789012:root
+ key_id:
+ description: The key ARN to which the grant applies.
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+ name:
+ description: The friendly name that identifies the grant.
+ type: str
+ returned: always
+ sample: xyz
+ operations:
+ description: The list of operations permitted by the grant.
+ type: list
+ returned: always
+ sample:
+ - Decrypt
+ - RetireGrant
+ retiring_principal:
+ description: The principal that can retire the grant.
+ type: str
+ returned: always
+ sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz
+changes_needed:
+ description: Grant types that would be changed/were changed.
+ type: dict
+ returned: always
+ sample: { "role": "add", "role grant": "add" }
+had_invalid_entries:
+ description: Whether there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made.
+ type: bool
+ returned: always
+'''
+
+# these mappings are used to go from simple labels to the actual 'Sid' values returned
+# by get_policy. They seem to be magic values.
+statement_label = {
+ 'role': 'Allow use of the key',
+ 'role grant': 'Allow attachment of persistent resources',
+ 'admin': 'Allow access for Key Administrators'
+}
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_iam_roles_with_backoff(connection):
+ paginator = connection.get_paginator('list_roles')
+ return paginator.paginate().build_full_result()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_keys_with_backoff(connection):
+ paginator = connection.get_paginator('list_keys')
+ return paginator.paginate().build_full_result()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_aliases_with_backoff(connection):
+ paginator = connection.get_paginator('list_aliases')
+ return paginator.paginate().build_full_result()
+
+
+def get_kms_aliases_lookup(connection):
+ _aliases = dict()
+ for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
+ # Not all aliases are actually associated with a key
+ if 'TargetKeyId' in alias:
+ # strip off leading 'alias/' and add it to key's aliases
+ if alias['TargetKeyId'] in _aliases:
+ _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
+ else:
+ _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
+ return _aliases
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_tags_with_backoff(connection, key_id, **kwargs):
+ return connection.list_resource_tags(KeyId=key_id, **kwargs)
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_grants_with_backoff(connection, key_id):
+ params = dict(KeyId=key_id)
+ paginator = connection.get_paginator('list_grants')
+ return paginator.paginate(**params).build_full_result()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_metadata_with_backoff(connection, key_id):
+ return connection.describe_key(KeyId=key_id)
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def list_key_policies_with_backoff(connection, key_id):
+ paginator = connection.get_paginator('list_key_policies')
+ return paginator.paginate(KeyId=key_id).build_full_result()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_key_policy_with_backoff(connection, key_id, policy_name):
+ return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
+
+
+def get_kms_tags(connection, module, key_id):
+ # Handle pagination here as list_resource_tags does not have
+ # a paginator
+ kwargs = {}
+ tags = []
+ more = True
+ while more:
+ try:
+ tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
+ tags.extend(tag_response['Tags'])
+ except is_boto3_error_code('AccessDeniedException'):
+ tag_response = {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain key tags")
+ if tag_response.get('NextMarker'):
+ kwargs['Marker'] = tag_response['NextMarker']
+ else:
+ more = False
+ return tags
+
+
+def get_kms_policies(connection, module, key_id):
+ try:
+ policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
+ return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
+ policy in policies]
+ except is_boto3_error_code('AccessDeniedException'):
+ return []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain key policies")
+
+
+def camel_to_snake_grant(grant):
+ ''' camel_to_snake_grant snakifies everything except the encryption context '''
+ constraints = grant.get('Constraints', {})
+ result = camel_dict_to_snake_dict(grant)
+ if 'EncryptionContextEquals' in constraints:
+ result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals']
+ if 'EncryptionContextSubset' in constraints:
+ result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset']
+ return result
+
+
+def get_key_details(connection, module, key_id):
+ try:
+ result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain key metadata")
+ result['KeyArn'] = result.pop('Arn')
+
+ try:
+ aliases = get_kms_aliases_lookup(connection)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain aliases")
+
+ try:
+ current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
+ result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled')
+ except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e:
+ result['enable_key_rotation'] = None
+ result['aliases'] = aliases.get(result['KeyId'], [])
+
+ result = camel_dict_to_snake_dict(result)
+
+ # grants and tags get snakified differently
+ try:
+ result['grants'] = [camel_to_snake_grant(grant) for grant in
+ get_kms_grants_with_backoff(connection, key_id)['Grants']]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain key grants")
+ tags = get_kms_tags(connection, module, key_id)
+ result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
+ result['policies'] = get_kms_policies(connection, module, key_id)
+ result['key_policies'] = [json.loads(policy) for policy in result['policies']]
+ return result
+
+
+def get_kms_facts(connection, module):
+ try:
+ keys = get_kms_keys_with_backoff(connection)['Keys']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain keys")
+
+ return [get_key_details(connection, module, key['KeyId']) for key in keys]
+
+
+def convert_grant_params(grant, key):
+ grant_params = dict(KeyId=key['key_arn'],
+ GranteePrincipal=grant['grantee_principal'])
+ if grant.get('operations'):
+ grant_params['Operations'] = grant['operations']
+ if grant.get('retiring_principal'):
+ grant_params['RetiringPrincipal'] = grant['retiring_principal']
+ if grant.get('name'):
+ grant_params['Name'] = grant['name']
+ if grant.get('constraints'):
+ grant_params['Constraints'] = dict()
+ if grant['constraints'].get('encryption_context_subset'):
+ grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset']
+ if grant['constraints'].get('encryption_context_equals'):
+ grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals']
+ return grant_params
+
+
+def different_grant(existing_grant, desired_grant):
+ if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'):
+ return True
+ if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'):
+ return True
+ if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')):
+ return True
+ if existing_grant.get('constraints') != desired_grant.get('constraints'):
+ return True
+ return False
+
+
+def compare_grants(existing_grants, desired_grants, purge_grants=False):
+ existing_dict = dict((eg['name'], eg) for eg in existing_grants)
+ desired_dict = dict((dg['name'], dg) for dg in desired_grants)
+ to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys())
+ if purge_grants:
+ to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys())
+ else:
+ to_remove_keys = set()
+ to_change_candidates = set(existing_dict.keys()) & set(desired_dict.keys())
+ for candidate in to_change_candidates:
+ if different_grant(existing_dict[candidate], desired_dict[candidate]):
+ to_add_keys.add(candidate)
+ to_remove_keys.add(candidate)
+
+ to_add = []
+ to_remove = []
+ for key in to_add_keys:
+ grant = desired_dict[key]
+ to_add.append(grant)
+ for key in to_remove_keys:
+ grant = existing_dict[key]
+ to_remove.append(grant)
+ return to_add, to_remove
+
+
+def start_key_deletion(connection, module, key_metadata):
+ if key_metadata['KeyState'] == 'PendingDeletion':
+ return False
+
+ if module.check_mode:
+ return True
+
+ deletion_params = {'KeyId': key_metadata['Arn']}
+ if module.params.get('pending_window'):
+ deletion_params['PendingWindowInDays'] = module.params.get('pending_window')
+
+ try:
+ connection.schedule_key_deletion(**deletion_params)
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to schedule key for deletion")
+
+
+def cancel_key_deletion(connection, module, key):
+ key_id = key['key_arn']
+ if key['key_state'] != 'PendingDeletion':
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.cancel_key_deletion(KeyId=key_id)
+ # key is disabled after deletion cancellation
+ # set this so that ensure_enabled_disabled works correctly
+ key['key_state'] = 'Disabled'
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to cancel key deletion")
+
+ return True
+
+
+def ensure_enabled_disabled(connection, module, key, enabled):
+ desired_state = 'Enabled'
+ if not enabled:
+ desired_state = 'Disabled'
+
+ if key['key_state'] == desired_state:
+ return False
+
+ key_id = key['key_arn']
+ if not module.check_mode:
+ if enabled:
+ try:
+ connection.enable_key(KeyId=key_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to enable key")
+ else:
+ try:
+ connection.disable_key(KeyId=key_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to disable key")
+
+ return True
+
+
+def update_alias(connection, module, key, alias):
+ alias = canonicalize_alias_name(alias)
+
+ if alias is None:
+ return False
+
+ key_id = key['key_arn']
+ aliases = get_kms_aliases_with_backoff(connection)['Aliases']
+ # We will only add new aliases, not rename existing ones
+ if alias in [_alias['AliasName'] for _alias in aliases]:
+ return False
+
+ if not module.check_mode:
+ try:
+ connection.create_alias(TargetKeyId=key_id, AliasName=alias)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed create key alias")
+
+ return True
+
+
+def update_description(connection, module, key, description):
+ if description is None:
+ return False
+ if key['description'] == description:
+ return False
+
+ key_id = key['key_arn']
+ if not module.check_mode:
+ try:
+ connection.update_key_description(KeyId=key_id, Description=description)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update key description")
+
+ return True
+
+
+def update_tags(connection, module, key, desired_tags, purge_tags):
+ if desired_tags is None:
+ return False
+
+ to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags)
+ if not (bool(to_add) or bool(to_remove)):
+ return False
+
+ key_id = key['key_arn']
+ if not module.check_mode:
+ if to_remove:
+ try:
+ connection.untag_resource(KeyId=key_id, TagKeys=to_remove)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to remove tag")
+ if to_add:
+ try:
+ tags = ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue')
+ connection.tag_resource(KeyId=key_id, Tags=tags)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to add tag to key")
+
+ return True
+
+
+def update_policy(connection, module, key, policy):
+ if policy is None:
+ return False
+ try:
+ new_policy = json.loads(policy)
+ except ValueError as e:
+ module.fail_json_aws(e, msg="Unable to parse new policy as JSON")
+
+ key_id = key['key_arn']
+ try:
+ keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default')
+ original_policy = json.loads(keyret['Policy'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ # If we can't fetch the current policy assume we're making a change
+ # Could occur if we have PutKeyPolicy without GetKeyPolicy
+ original_policy = {}
+
+ if not compare_policies(original_policy, new_policy):
+ return False
+
+ if not module.check_mode:
+ try:
+ connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update key policy")
+
+ return True
+
+
+def update_key_rotation(connection, module, key, enable_key_rotation):
+ if enable_key_rotation is None:
+ return False
+ key_id = key['key_arn']
+
+ try:
+ current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
+ if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation:
+ return False
+ except is_boto3_error_code('AccessDeniedException'):
+ pass
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to get current key rotation status")
+
+ if not module.check_mode:
+ try:
+ if enable_key_rotation:
+ connection.enable_key_rotation(KeyId=key_id)
+ else:
+ connection.disable_key_rotation(KeyId=key_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to enable/disable key rotation")
+
+ return True
+
+
+def update_grants(connection, module, key, desired_grants, purge_grants):
+ existing_grants = key['grants']
+
+ to_add, to_remove = compare_grants(existing_grants, desired_grants, purge_grants)
+ if not (bool(to_add) or bool(to_remove)):
+ return False
+
+ key_id = key['key_arn']
+ if not module.check_mode:
+ for grant in to_remove:
+ try:
+ connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to retire grant")
+ for grant in to_add:
+ grant_params = convert_grant_params(grant, key)
+ try:
+ connection.create_grant(**grant_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create grant")
+
+ return True
+
+
+def update_key(connection, module, key):
+ changed = False
+
+ changed |= cancel_key_deletion(connection, module, key)
+ changed |= ensure_enabled_disabled(connection, module, key, module.params['enabled'])
+ changed |= update_alias(connection, module, key, module.params['alias'])
+ changed |= update_description(connection, module, key, module.params['description'])
+ changed |= update_tags(connection, module, key, module.params['tags'], module.params.get('purge_tags'))
+ changed |= update_policy(connection, module, key, module.params.get('policy'))
+ changed |= update_grants(connection, module, key, module.params.get('grants'), module.params.get('purge_grants'))
+ changed |= update_key_rotation(connection, module, key, module.params.get('enable_key_rotation'))
+
+ # make results consistent with kms_facts before returning
+ result = get_key_details(connection, module, key['key_arn'])
+ result['changed'] = changed
+ return result
+
+
+def create_key(connection, module):
+ key_usage = module.params.get('key_usage')
+ key_spec = module.params.get('key_spec')
+ tags_list = ansible_dict_to_boto3_tag_list(
+ module.params['tags'] or {},
+ # KMS doesn't use "Key" and "Value" as other APIs do.
+ tag_name_key_name='TagKey', tag_value_key_name='TagValue'
+ )
+ params = dict(BypassPolicyLockoutSafetyCheck=False,
+ Tags=tags_list,
+ KeyUsage=key_usage,
+ CustomerMasterKeySpec=key_spec,
+ Origin='AWS_KMS')
+
+ if module.check_mode:
+ return {'changed': True}
+
+ if module.params.get('description'):
+ params['Description'] = module.params['description']
+ if module.params.get('policy'):
+ params['Policy'] = module.params['policy']
+
+ try:
+ result = connection.create_key(**params)['KeyMetadata']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to create initial key")
+
+ key = get_key_details(connection, module, result['KeyId'])
+ update_alias(connection, module, key, module.params['alias'])
+ update_key_rotation(connection, module, key, module.params.get('enable_key_rotation'))
+
+ ensure_enabled_disabled(connection, module, key, module.params.get('enabled'))
+ update_grants(connection, module, key, module.params.get('grants'), False)
+
+ # make results consistent with kms_facts
+ result = get_key_details(connection, module, key['key_id'])
+ result['changed'] = True
+ return result
+
+
+def delete_key(connection, module, key_metadata):
+ changed = False
+
+ changed |= start_key_deletion(connection, module, key_metadata)
+
+ result = get_key_details(connection, module, key_metadata['Arn'])
+ result['changed'] = changed
+ return result
+
+
+def get_arn_from_role_name(iam, rolename):
+ ret = iam.get_role(RoleName=rolename)
+ if ret.get('Role') and ret['Role'].get('Arn'):
+ return ret['Role']['Arn']
+ raise Exception('could not find arn for name {0}.'.format(rolename))
+
+
+def canonicalize_alias_name(alias):
+ if alias is None:
+ return None
+ if alias.startswith('alias/'):
+ return alias
+ return 'alias/' + alias
+
+
+def fetch_key_metadata(connection, module, key_id, alias):
+ # Note - fetching a key's metadata is very inconsistent shortly after any sort of update to a key has occurred.
+ # Combinations of manual waiters, checking expecting key values to actual key value, and static sleeps
+ # have all been exhausted, but none of those available options have solved the problem.
+ # Integration tests will wait for 10 seconds to combat this issue.
+ # See https://github.com/ansible-collections/community.aws/pull/1052.
+
+ alias = canonicalize_alias_name(module.params.get('alias'))
+
+ try:
+ # Fetch by key_id where possible
+ if key_id:
+ return get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
+ # Or try alias as a backup
+ return get_kms_metadata_with_backoff(connection, alias)['KeyMetadata']
+
+ except connection.exceptions.NotFoundException:
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, 'Failed to fetch key metadata.')
+
+
+def main():
+ argument_spec = dict(
+ alias=dict(aliases=['key_alias']),
+ pending_window=dict(aliases=['deletion_delay'], type='int'),
+ key_id=dict(aliases=['key_arn']),
+ description=dict(),
+ enabled=dict(type='bool', default=True),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ grants=dict(type='list', default=[], elements='dict'),
+ policy=dict(type='json'),
+ purge_grants=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ enable_key_rotation=(dict(type='bool')),
+ key_spec=dict(type='str', default='SYMMETRIC_DEFAULT', aliases=['customer_master_key_spec'],
+ choices=['SYMMETRIC_DEFAULT', 'RSA_2048', 'RSA_3072', 'RSA_4096', 'ECC_NIST_P256', 'ECC_NIST_P384', 'ECC_NIST_P521', 'ECC_SECG_P256K1']),
+ key_usage=dict(type='str', default='ENCRYPT_DECRYPT', choices=['ENCRYPT_DECRYPT', 'SIGN_VERIFY']),
+ )
+
+ module = AnsibleAWSModule(
+ supports_check_mode=True,
+ argument_spec=argument_spec,
+ required_one_of=[['alias', 'key_id']],
+ )
+
+ kms = module.client('kms')
+
+ module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.",
+ date='2024-05-01', collection_name='amazon.aws')
+
+ key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias'))
+ # We can't create keys with a specific ID, if we can't access the key we'll have to fail
+ if module.params.get('state') == 'present' and module.params.get('key_id') and not key_metadata:
+ module.fail_json(msg="Could not find key with id {0} to update".format(module.params.get('key_id')))
+
+ if module.params.get('state') == 'absent':
+ if key_metadata is None:
+ module.exit_json(changed=False)
+ result = delete_key(kms, module, key_metadata)
+ module.exit_json(**result)
+
+ if key_metadata:
+ key_details = get_key_details(kms, module, key_metadata['Arn'])
+ result = update_key(kms, module, key_details)
+ module.exit_json(**result)
+
+ result = create_key(kms, module)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py
new file mode 100644
index 00000000..ba8f30a2
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: kms_key_info
+version_added: 5.0.0
+short_description: Gather information about AWS KMS keys
+description:
+ - Gather information about AWS KMS keys including tags and grants.
+ - Prior to release 5.0.0 this module was called C(community.aws.aws_kms_info).
+ The usage did not change.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - "Will Thames (@willthames)"
+options:
+ alias:
+ description:
+ - Alias for key.
+ - Mutually exclusive with I(key_id) and I(filters).
+ required: false
+ aliases:
+ - key_alias
+ type: str
+ version_added: 1.4.0
+ version_added_collection: community.aws
+ key_id:
+ description:
+ - Key ID or ARN of the key.
+ - Mutually exclusive with I(alias) and I(filters).
+ required: false
+ aliases:
+ - key_arn
+ type: str
+ version_added: 1.4.0
+ version_added_collection: community.aws
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ The filters aren't natively supported by boto3, but are supported to provide similar
+ functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and
+ C(tag:tagName)) are available, as are C(key-id) and C(alias)
+ - Mutually exclusive with I(alias) and I(key_id).
+ type: dict
+ pending_deletion:
+ description: Whether to get full details (tags, grants etc.) of keys pending deletion.
+ default: False
+ type: bool
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all KMS keys
+- amazon.aws.kms_key_info:
+
+# Gather information about all keys with a Name tag
+- amazon.aws.kms_key_info:
+ filters:
+ tag-key: Name
+
+# Gather information about all keys with a specific name
+- amazon.aws.kms_key_info:
+ filters:
+ "tag:Name": Example
+'''
+
+RETURN = r'''
+kms_keys:
+ description: List of keys.
+ type: complex
+ returned: always
+ contains:
+ key_id:
+ description: ID of key.
+ type: str
+ returned: always
+ sample: abcd1234-abcd-1234-5678-ef1234567890
+ key_arn:
+ description: ARN of key.
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+ key_state:
+ description:
+ - The state of the key.
+ - Will be one of C('Creating'), C('Enabled'), C('Disabled'), C('PendingDeletion'), C('PendingImport'),
+ C('PendingReplicaDeletion'), C('Unavailable'), or C('Updating').
+ type: str
+ returned: always
+ sample: PendingDeletion
+ key_usage:
+ description: The cryptographic operations for which you can use the key.
+ type: str
+ returned: always
+ sample: ENCRYPT_DECRYPT
+ origin:
+ description: The source of the key's key material. When this value is C(AWS_KMS),
+ AWS KMS created the key material. When this value is C(EXTERNAL), the
+ key material was imported or the CMK lacks key material.
+ type: str
+ returned: always
+ sample: AWS_KMS
+ aws_account_id:
+ description: The AWS Account ID that the key belongs to.
+ type: str
+ returned: always
+ sample: 123456789012
+ creation_date:
+ description: Date and time of creation of the key.
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08.551000+10:00"
+ deletion_date:
+ description: Date and time after which KMS deletes this KMS key.
+ type: str
+ returned: when key_state is PendingDeletion
+ sample: "2017-04-18T15:12:08.551000+10:00"
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ description:
+ description: Description of the key.
+ type: str
+ returned: always
+ sample: "My Key for Protecting important stuff"
+ enabled:
+ description: Whether the key is enabled. True if I(key_state) is C(Enabled).
+ type: bool
+ returned: always
+ sample: false
+ enable_key_rotation:
+ description: Whether the automatic annual key rotation is enabled. Returns None if key rotation status can't be determined.
+ type: bool
+ returned: always
+ sample: false
+ aliases:
+ description: list of aliases associated with the key.
+ type: list
+ returned: always
+ sample:
+ - aws/acm
+ - aws/ebs
+ tags:
+ description: Dictionary of tags applied to the key. Empty when access is denied even if there are tags.
+ type: dict
+ returned: always
+ sample:
+ Name: myKey
+ Purpose: protecting_stuff
+ policies:
+ description: List of policy documents for the key. Empty when access is denied even if there are policies.
+ type: list
+ returned: always
+ elements: str
+ sample:
+ Version: "2012-10-17"
+ Id: "auto-ebs-2"
+ Statement:
+ - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
+ Effect: "Allow"
+ Principal:
+ AWS: "*"
+ Action:
+ - "kms:Encrypt"
+ - "kms:Decrypt"
+ - "kms:ReEncrypt*"
+ - "kms:GenerateDataKey*"
+ - "kms:CreateGrant"
+ - "kms:DescribeKey"
+ Resource: "*"
+ Condition:
+ StringEquals:
+ kms:CallerAccount: "123456789012"
+ kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
+ - Sid: "Allow direct access to key metadata to the account"
+ Effect: "Allow"
+ Principal:
+ AWS: "arn:aws:iam::123456789012:root"
+ Action:
+ - "kms:Describe*"
+ - "kms:Get*"
+ - "kms:List*"
+ - "kms:RevokeGrant"
+ Resource: "*"
+ key_policies:
+ description: List of policy documents for the key. Empty when access is denied even if there are policies.
+ type: list
+ returned: always
+ elements: dict
+ sample:
+ Version: "2012-10-17"
+ Id: "auto-ebs-2"
+ Statement:
+ - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
+ Effect: "Allow"
+ Principal:
+ AWS: "*"
+ Action:
+ - "kms:Encrypt"
+ - "kms:Decrypt"
+ - "kms:ReEncrypt*"
+ - "kms:GenerateDataKey*"
+ - "kms:CreateGrant"
+ - "kms:DescribeKey"
+ Resource: "*"
+ Condition:
+ StringEquals:
+ kms:CallerAccount: "123456789012"
+ kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
+ - Sid: "Allow direct access to key metadata to the account"
+ Effect: "Allow"
+ Principal:
+ AWS: "arn:aws:iam::123456789012:root"
+ Action:
+ - "kms:Describe*"
+ - "kms:Get*"
+ - "kms:List*"
+ - "kms:RevokeGrant"
+ Resource: "*"
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ grants:
+ description: List of grants associated with a key.
+ type: list
+ elements: dict
+ returned: always
+ contains:
+ constraints:
+ description: Constraints on the encryption context that the grant allows.
+ See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
+ type: dict
+ returned: always
+ sample:
+ encryption_context_equals:
+ "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:123456789012:function:xyz"
+ creation_date:
+ description: Date of creation of the grant.
+ type: str
+ returned: always
+ sample: "2017-04-18T15:12:08+10:00"
+ grant_id:
+ description: The unique ID for the grant.
+ type: str
+ returned: always
+ sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
+ grantee_principal:
+ description: The principal that receives the grant's permissions.
+ type: str
+ returned: always
+ sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz
+ issuing_account:
+ description: The AWS account under which the grant was issued.
+ type: str
+ returned: always
+ sample: arn:aws:iam::123456789012:root
+ key_id:
+ description: The key ARN to which the grant applies.
+ type: str
+ returned: always
+ sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
+ name:
+ description: The friendly name that identifies the grant.
+ type: str
+ returned: always
+ sample: xyz
+ operations:
+ description: The list of operations permitted by the grant.
+ type: list
+ returned: always
+ sample:
+ - Decrypt
+ - RetireGrant
+ retiring_principal:
+ description: The principal that can retire the grant.
+ type: str
+ returned: always
+ sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+# Caching lookup for aliases
+_aliases = dict()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_keys_with_backoff(connection):
+ paginator = connection.get_paginator('list_keys')
+ return paginator.paginate().build_full_result()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_aliases_with_backoff(connection):
+ paginator = connection.get_paginator('list_aliases')
+ return paginator.paginate().build_full_result()
+
+
+def get_kms_aliases_lookup(connection):
+ if not _aliases:
+ for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
+ # Not all aliases are actually associated with a key
+ if 'TargetKeyId' in alias:
+ # strip off leading 'alias/' and add it to key's aliases
+ if alias['TargetKeyId'] in _aliases:
+ _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
+ else:
+ _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
+ return _aliases
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_tags_with_backoff(connection, key_id, **kwargs):
+ return connection.list_resource_tags(KeyId=key_id, **kwargs)
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_grants_with_backoff(connection, key_id, **kwargs):
+ params = dict(KeyId=key_id)
+ if kwargs.get('tokens'):
+ params['GrantTokens'] = kwargs['tokens']
+ paginator = connection.get_paginator('list_grants')
+ return paginator.paginate(**params).build_full_result()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_kms_metadata_with_backoff(connection, key_id):
+ return connection.describe_key(KeyId=key_id)
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def list_key_policies_with_backoff(connection, key_id):
+ paginator = connection.get_paginator('list_key_policies')
+ return paginator.paginate(KeyId=key_id).build_full_result()
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_key_policy_with_backoff(connection, key_id, policy_name):
+ return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
+
+
+@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
+def get_enable_key_rotation_with_backoff(connection, key_id):
+ try:
+ current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
+ except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e:
+ return None
+
+ return current_rotation_status.get('KeyRotationEnabled')
+
+
+def canonicalize_alias_name(alias):
+ if alias is None:
+ return None
+ if alias.startswith('alias/'):
+ return alias
+ return 'alias/' + alias
+
+
+def get_kms_tags(connection, module, key_id):
+ # Handle pagination here as list_resource_tags does not have
+ # a paginator
+ kwargs = {}
+ tags = []
+ more = True
+ while more:
+ try:
+ tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
+ tags.extend(tag_response['Tags'])
+ except is_boto3_error_code('AccessDeniedException'):
+ tag_response = {}
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain key tags")
+ if tag_response.get('NextMarker'):
+ kwargs['Marker'] = tag_response['NextMarker']
+ else:
+ more = False
+ return tags
+
+
+def get_kms_policies(connection, module, key_id):
+ try:
+ policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
+ return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
+ policy in policies]
+ except is_boto3_error_code('AccessDeniedException'):
+ return []
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain key policies")
+
+
+def key_matches_filter(key, filtr):
+ if filtr[0] == 'key-id':
+ return filtr[1] == key['key_id']
+ if filtr[0] == 'tag-key':
+ return filtr[1] in key['tags']
+ if filtr[0] == 'tag-value':
+ return filtr[1] in key['tags'].values()
+ if filtr[0] == 'alias':
+ return filtr[1] in key['aliases']
+ if filtr[0].startswith('tag:'):
+ tag_key = filtr[0][4:]
+ if tag_key not in key['tags']:
+ return False
+ return key['tags'].get(tag_key) == filtr[1]
+
+
+def key_matches_filters(key, filters):
+ if not filters:
+ return True
+ else:
+ return all(key_matches_filter(key, filtr) for filtr in filters.items())
+
+
+def get_key_details(connection, module, key_id, tokens=None):
+ if not tokens:
+ tokens = []
+ try:
+ result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
+ # Make sure we have the canonical ARN, we might have been passed an alias
+ key_id = result['Arn']
+ except is_boto3_error_code('NotFoundException'):
+ return None
+ except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except
+ module.warn('Permission denied fetching key metadata ({0})'.format(key_id))
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain key metadata")
+ result['KeyArn'] = result.pop('Arn')
+
+ try:
+ aliases = get_kms_aliases_lookup(connection)
+ except is_boto3_error_code('AccessDeniedException'):
+ module.warn('Permission denied fetching key aliases')
+ aliases = {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain aliases")
+ # We can only get aliases for our own account, so we don't need the full ARN
+ result['aliases'] = aliases.get(result['KeyId'], [])
+ result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id)
+
+ if module.params.get('pending_deletion'):
+ return camel_dict_to_snake_dict(result)
+
+ try:
+ result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants']
+ except is_boto3_error_code('AccessDeniedException'):
+ module.warn('Permission denied fetching key grants ({0})'.format(key_id))
+ result['grants'] = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to obtain key grants")
+
+ tags = get_kms_tags(connection, module, key_id)
+
+ result = camel_dict_to_snake_dict(result)
+ result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
+ result['policies'] = get_kms_policies(connection, module, key_id)
+ result['key_policies'] = [json.loads(policy) for policy in result['policies']]
+ return result
+
+
+def get_kms_info(connection, module):
+ if module.params.get('key_id'):
+ key_id = module.params.get('key_id')
+ details = get_key_details(connection, module, key_id)
+ if details:
+ return [details]
+ return []
+ elif module.params.get('alias'):
+ alias = canonicalize_alias_name(module.params.get('alias'))
+ details = get_key_details(connection, module, alias)
+ if details:
+ return [details]
+ return []
+ else:
+ try:
+ keys = get_kms_keys_with_backoff(connection)['Keys']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to obtain keys")
+ return [get_key_details(connection, module, key['KeyId']) for key in keys]
+
+
+def main():
+ argument_spec = dict(
+ alias=dict(aliases=['key_alias']),
+ key_id=dict(aliases=['key_arn']),
+ filters=dict(type='dict'),
+ pending_deletion=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ mutually_exclusive=[['alias', 'filters', 'key_id']],
+ supports_check_mode=True)
+
+ try:
+ connection = module.client('kms')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.",
+ date='2024-05-01', collection_name='amazon.aws')
+
+ all_keys = get_kms_info(connection, module)
+ filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params['filters'])]
+ ret_params = dict(kms_keys=filtered_keys)
+
+ module.exit_json(**ret_params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda.py b/ansible_collections/amazon/aws/plugins/modules/lambda.py
new file mode 100644
index 00000000..da947f69
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda.py
@@ -0,0 +1,803 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lambda
+version_added: 5.0.0
+short_description: Manage AWS Lambda functions
+description:
+ - Allows for the management of Lambda functions.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ name:
+ description:
+ - The name you want to assign to the function you are uploading. Cannot be changed.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete Lambda function.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ runtime:
+ description:
+ - The runtime environment for the Lambda function you are uploading.
+ - Required when creating a function. Uses parameters as described in boto3 docs.
+ - Required when I(state=present).
+ - For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
+ type: str
+ role:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
+ resources. You may use the bare ARN if the role belongs to the same AWS account.
+ - Required when I(state=present).
+ type: str
+ handler:
+ description:
+ - The function within your code that Lambda calls to begin execution.
+ type: str
+ zip_file:
+ description:
+ - A .zip file containing your deployment package
+ - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present.
+ aliases: [ 'src' ]
+ type: str
+ s3_bucket:
+ description:
+ - Amazon S3 bucket name where the .zip file containing your deployment package is stored.
+ - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present.
+ - I(s3_bucket) and I(s3_key) are required together.
+ type: str
+ s3_key:
+ description:
+ - The Amazon S3 object (the deployment package) key name you want to upload.
+ - I(s3_bucket) and I(s3_key) are required together.
+ type: str
+ s3_object_version:
+ description:
+ - The Amazon S3 object (the deployment package) version you want to upload.
+ type: str
+ description:
+ description:
+ - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
+ type: str
+ timeout:
+ description:
+ - The function maximum execution time in seconds after which Lambda should terminate the function.
+ default: 3
+ type: int
+ memory_size:
+ description:
+ - The amount of memory, in MB, your Lambda function is given.
+ default: 128
+ type: int
+ vpc_subnet_ids:
+ description:
+ - List of subnet IDs to run Lambda function in.
+ - Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC.
+ - If set, I(vpc_security_group_ids) must also be set.
+ type: list
+ elements: str
+ vpc_security_group_ids:
+ description:
+ - List of VPC security group IDs to associate with the Lambda function.
+ - Required when I(vpc_subnet_ids) is used.
+ type: list
+ elements: str
+ environment_variables:
+ description:
+ - A dictionary of environment variables the Lambda function is given.
+ type: dict
+ dead_letter_arn:
+ description:
+ - The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
+ type: str
+ tracing_mode:
+ description:
+ - Set mode to 'Active' to sample and trace incoming requests with AWS X-Ray. Turned off (set to 'PassThrough') by default.
+ choices: ['Active', 'PassThrough']
+ type: str
+ kms_key_arn:
+ description:
+ - The KMS key ARN used to encrypt the function's environment variables.
+ type: str
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ architecture:
+ description:
+ - The instruction set architecture that the function supports.
+ - Requires one of I(s3_bucket) or I(zip_file).
+ - Requires botocore >= 1.21.51.
+ type: str
+ choices: ['x86_64', 'arm64']
+ aliases: ['architectures']
+ version_added: 5.0.0
+author:
+ - 'Steyn Huizinga (@steynovich)'
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Create Lambda functions
+- name: looped creation
+ amazon.aws.lambda:
+ name: '{{ item.name }}'
+ state: present
+ zip_file: '{{ item.zip_file }}'
+ runtime: 'python2.7'
+ role: 'arn:aws:iam::123456789012:role/lambda_basic_execution'
+ handler: 'hello_python.my_handler'
+ vpc_subnet_ids:
+ - subnet-123abcde
+ - subnet-edcba321
+ vpc_security_group_ids:
+ - sg-123abcde
+ - sg-edcba321
+ environment_variables: '{{ item.env_vars }}'
+ tags:
+ key1: 'value1'
+ loop:
+ - name: HelloWorld
+ zip_file: hello-code.zip
+ env_vars:
+ key1: "first"
+ key2: "second"
+ - name: ByeBye
+ zip_file: bye-code.zip
+ env_vars:
+ key1: "1"
+ key2: "2"
+
+# To remove previously added tags pass an empty dict
+- name: remove tags
+ amazon.aws.lambda:
+ name: 'Lambda function'
+ state: present
+ zip_file: 'code.zip'
+ runtime: 'python2.7'
+ role: 'arn:aws:iam::123456789012:role/lambda_basic_execution'
+ handler: 'hello_python.my_handler'
+ tags: {}
+
+# Basic Lambda function deletion
+- name: Delete Lambda functions HelloWorld and ByeBye
+ amazon.aws.lambda:
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - HelloWorld
+ - ByeBye
+'''
+
+RETURN = r'''
+code:
+ description: The lambda function's code returned by get_function in boto3.
+ returned: success
+ type: dict
+ contains:
+ location:
+ description:
+ - The presigned URL you can use to download the function's .zip file that you previously uploaded.
+ - The URL is valid for up to 10 minutes.
+ returned: success
+ type: str
+ sample: 'https://prod-04-2014-tasks.s3.us-east-1.amazonaws.com/snapshots/sample'
+ repository_type:
+ description: The repository from which you can download the function.
+ returned: success
+ type: str
+ sample: 'S3'
+configuration:
+ description: the lambda function's configuration metadata returned by get_function in boto3
+ returned: success
+ type: dict
+ contains:
+ architectures:
+ description: The architectures supported by the function.
+ returned: successful run where botocore >= 1.21.51
+ type: list
+ elements: str
+ sample: ['arm64']
+ code_sha256:
+ description: The SHA256 hash of the function's deployment package.
+ returned: success
+ type: str
+ sample: 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU='
+ code_size:
+ description: The size of the function's deployment package in bytes.
+ returned: success
+ type: int
+ sample: 123
+ dead_letter_config:
+ description: The function's dead letter queue.
+ returned: when the function has a dead letter queue configured
+ type: dict
+ sample: { 'target_arn': arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 }
+ contains:
+ target_arn:
+ description: The ARN of an SQS queue or SNS topic.
+ returned: when the function has a dead letter queue configured
+ type: str
+ sample: arn:aws:lambda:us-east-1:123456789012:function:myFunction:1
+ description:
+ description: The function's description.
+ returned: success
+ type: str
+ sample: 'My function'
+ environment:
+ description: The function's environment variables.
+ returned: when environment variables exist
+ type: dict
+ contains:
+ variables:
+ description: Environment variable key-value pairs.
+ returned: when environment variables exist
+ type: dict
+ sample: {'key': 'value'}
+ error:
+ description: Error message for environment variables that could not be applied.
+ returned: when there is an error applying environment variables
+ type: dict
+ contains:
+ error_code:
+ description: The error code.
+ returned: when there is an error applying environment variables
+ type: str
+ message:
+ description: The error message.
+ returned: when there is an error applying environment variables
+ type: str
+ function_arn:
+ description: The function's Amazon Resource Name (ARN).
+ returned: on success
+ type: str
+ sample: 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1'
+ function_name:
+ description: The function's name.
+ returned: on success
+ type: str
+ sample: 'myFunction'
+ handler:
+ description: The function Lambda calls to begin executing your function.
+ returned: on success
+ type: str
+ sample: 'index.handler'
+ last_modified:
+ description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ssTZD).
+ returned: on success
+ type: str
+ sample: '2017-08-01T00:00:00.000+0000'
+ memory_size:
+ description: The memory allocated to the function.
+ returned: on success
+ type: int
+ sample: 128
+ revision_id:
+ description: The latest updated revision of the function or alias.
+ returned: on success
+ type: str
+ sample: 'a2x9886d-d48a-4a0c-ab64-82abc005x80c'
+ role:
+ description: The function's execution role.
+ returned: on success
+ type: str
+ sample: 'arn:aws:iam::123456789012:role/lambda_basic_execution'
+ runtime:
+ description: The funtime environment for the Lambda function.
+ returned: on success
+ type: str
+ sample: 'nodejs6.10'
+ tracing_config:
+ description: The function's AWS X-Ray tracing configuration.
+ returned: on success
+ type: dict
+ sample: { 'mode': 'Active' }
+ contains:
+ mode:
+ description: The tracing mode.
+ returned: on success
+ type: str
+ sample: 'Active'
+ timeout:
+ description: The amount of time that Lambda allows a function to run before terminating it.
+ returned: on success
+ type: int
+ sample: 3
+ version:
+ description: The version of the Lambda function.
+ returned: on success
+ type: str
+ sample: '1'
+ vpc_config:
+ description: The function's networking configuration.
+ returned: on success
+ type: dict
+ sample: {
+ 'security_group_ids': [],
+ 'subnet_ids': [],
+ 'vpc_id': '123'
+ }
+'''
+
+import base64
+import hashlib
+import traceback
+import re
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError, WaiterError
+except ImportError:
+ pass # protected by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+
+def get_account_info(module):
+ """return the account information (account id and partition) we are currently working on
+
+ get_account_info tries too find out the account that we are working
+ on. It's not guaranteed that this will be easy so we try in
+ several different ways. Giving either IAM or STS privileges to
+ the account should be enough to permit this.
+ """
+ account_id = None
+ partition = None
+ try:
+ sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+ caller_id = sts_client.get_caller_identity(aws_retry=True)
+ account_id = caller_id.get('Account')
+ partition = caller_id.get('Arn').split(':')[1]
+ except (BotoCoreError, ClientError):
+ try:
+ iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ arn, partition, service, reg, account_id, resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':')
+ except is_boto3_error_code('AccessDenied') as e:
+ try:
+ except_msg = to_native(e.message)
+ except AttributeError:
+ except_msg = to_native(e)
+ m = re.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/", except_msg)
+ if m is None:
+ module.fail_json_aws(e, msg="getting account information")
+ account_id = m.group(4)
+ partition = m.group(1)
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="getting account information")
+
+ return account_id, partition
+
+
+def get_current_function(connection, function_name, qualifier=None):
+ try:
+ if qualifier is not None:
+ return connection.get_function(FunctionName=function_name, Qualifier=qualifier, aws_retry=True)
+ return connection.get_function(FunctionName=function_name, aws_retry=True)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return None
+
+
+def sha256sum(filename):
+ hasher = hashlib.sha256()
+ with open(filename, 'rb') as f:
+ hasher.update(f.read())
+
+ code_hash = hasher.digest()
+ code_b64 = base64.b64encode(code_hash)
+ hex_digest = code_b64.decode('utf-8')
+
+ return hex_digest
+
+
+def set_tag(client, module, tags, function, purge_tags):
+
+ if tags is None:
+ return False
+
+ changed = False
+ arn = function['Configuration']['FunctionArn']
+
+ try:
+ current_tags = client.list_tags(Resource=arn, aws_retry=True).get('Tags', {})
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to list tags")
+
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
+
+ if not tags_to_remove and not tags_to_add:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ if tags_to_remove:
+ client.untag_resource(
+ Resource=arn,
+ TagKeys=tags_to_remove,
+ aws_retry=True
+ )
+ changed = True
+
+ if tags_to_add:
+ client.tag_resource(
+ Resource=arn,
+ Tags=tags_to_add,
+ aws_retry=True
+ )
+ changed = True
+
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to tag resource {0}".format(arn))
+
+ return changed
+
+
+def wait_for_lambda(client, module, name):
+ try:
+ client_active_waiter = client.get_waiter('function_active')
+ client_updated_waiter = client.get_waiter('function_updated')
+ client_active_waiter.wait(FunctionName=name)
+ client_updated_waiter.wait(FunctionName=name)
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='Timeout while waiting on lambda to finish updating')
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed while waiting on lambda to finish updating')
+
+
+def format_response(response):
+ tags = response.get("Tags", {})
+ result = camel_dict_to_snake_dict(response)
+ # Lambda returns a dict rather than the usual boto3 list of dicts
+ result["tags"] = tags
+ return result
+
+
+def _zip_args(zip_file, current_config, ignore_checksum):
+ if not zip_file:
+ return {}
+
+ # If there's another change that needs to happen, we always re-upload the code
+ if not ignore_checksum:
+ local_checksum = sha256sum(zip_file)
+ remote_checksum = current_config.get('CodeSha256', '')
+ if local_checksum == remote_checksum:
+ return {}
+
+ with open(zip_file, 'rb') as f:
+ zip_content = f.read()
+ return {'ZipFile': zip_content}
+
+
+def _s3_args(s3_bucket, s3_key, s3_object_version):
+ if not s3_bucket:
+ return {}
+ if not s3_key:
+ return {}
+
+ code = {'S3Bucket': s3_bucket,
+ 'S3Key': s3_key}
+ if s3_object_version:
+ code.update({'S3ObjectVersion': s3_object_version})
+
+ return code
+
+
+def _code_args(module, current_config):
+ s3_bucket = module.params.get('s3_bucket')
+ s3_key = module.params.get('s3_key')
+ s3_object_version = module.params.get('s3_object_version')
+ zip_file = module.params.get('zip_file')
+ architectures = module.params.get('architecture')
+ checksum_match = False
+
+ code_kwargs = {}
+
+ if architectures and current_config.get('Architectures', None) != [architectures]:
+ module.warn('Arch Change')
+ code_kwargs.update({'Architectures': [architectures]})
+
+ try:
+ code_kwargs.update(_zip_args(zip_file, current_config, bool(code_kwargs)))
+ except IOError as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+ code_kwargs.update(_s3_args(s3_bucket, s3_key, s3_object_version))
+
+ if not code_kwargs:
+ return {}
+
+ if not architectures and current_config.get('Architectures', None):
+ code_kwargs.update({'Architectures': current_config.get('Architectures', None)})
+
+ return code_kwargs
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ runtime=dict(),
+ role=dict(),
+ handler=dict(),
+ zip_file=dict(aliases=['src']),
+ s3_bucket=dict(),
+ s3_key=dict(no_log=False),
+ s3_object_version=dict(),
+ description=dict(default=''),
+ timeout=dict(type='int', default=3),
+ memory_size=dict(type='int', default=128),
+ vpc_subnet_ids=dict(type='list', elements='str'),
+ vpc_security_group_ids=dict(type='list', elements='str'),
+ environment_variables=dict(type='dict'),
+ dead_letter_arn=dict(),
+ kms_key_arn=dict(type='str', no_log=False),
+ tracing_mode=dict(choices=['Active', 'PassThrough']),
+ architecture=dict(choices=['x86_64', 'arm64'], type='str', aliases=['architectures']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+
+ mutually_exclusive = [['zip_file', 's3_key'],
+ ['zip_file', 's3_bucket'],
+ ['zip_file', 's3_object_version']]
+
+ required_together = [['s3_key', 's3_bucket'],
+ ['vpc_subnet_ids', 'vpc_security_group_ids']]
+
+ required_if = [
+ ['state', 'present', ['runtime', 'handler', 'role']],
+ ['architecture', 'x86_64', ['zip_file', 's3_bucket'], True],
+ ['architecture', 'arm64', ['zip_file', 's3_bucket'], True],
+ ]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_if=required_if)
+
+ name = module.params.get('name')
+ state = module.params.get('state').lower()
+ runtime = module.params.get('runtime')
+ role = module.params.get('role')
+ handler = module.params.get('handler')
+ s3_bucket = module.params.get('s3_bucket')
+ s3_key = module.params.get('s3_key')
+ s3_object_version = module.params.get('s3_object_version')
+ zip_file = module.params.get('zip_file')
+ description = module.params.get('description')
+ timeout = module.params.get('timeout')
+ memory_size = module.params.get('memory_size')
+ vpc_subnet_ids = module.params.get('vpc_subnet_ids')
+ vpc_security_group_ids = module.params.get('vpc_security_group_ids')
+ environment_variables = module.params.get('environment_variables')
+ dead_letter_arn = module.params.get('dead_letter_arn')
+ tracing_mode = module.params.get('tracing_mode')
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+ kms_key_arn = module.params.get('kms_key_arn')
+ architectures = module.params.get('architecture')
+
+ check_mode = module.check_mode
+ changed = False
+
+ if architectures:
+ module.require_botocore_at_least(
+ '1.21.51', reason='to configure the architectures that the function supports.')
+
+ try:
+ client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff())
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Trying to connect to AWS")
+
+ if state == 'present':
+ if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role):
+ role_arn = role
+ else:
+ # get account ID and assemble ARN
+ account_id, partition = get_account_info(module)
+ role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role)
+
+ # Get function configuration if present, False otherwise
+ current_function = get_current_function(client, name)
+
+ # Update existing Lambda function
+ if state == 'present' and current_function:
+
+ # Get current state
+ current_config = current_function['Configuration']
+ current_version = None
+
+ # Update function configuration
+ func_kwargs = {'FunctionName': name}
+
+ # Update configuration if needed
+ if role_arn and current_config['Role'] != role_arn:
+ func_kwargs.update({'Role': role_arn})
+ if handler and current_config['Handler'] != handler:
+ func_kwargs.update({'Handler': handler})
+ if description and current_config['Description'] != description:
+ func_kwargs.update({'Description': description})
+ if timeout and current_config['Timeout'] != timeout:
+ func_kwargs.update({'Timeout': timeout})
+ if memory_size and current_config['MemorySize'] != memory_size:
+ func_kwargs.update({'MemorySize': memory_size})
+ if runtime and current_config['Runtime'] != runtime:
+ func_kwargs.update({'Runtime': runtime})
+ if (environment_variables is not None) and (current_config.get(
+ 'Environment', {}).get('Variables', {}) != environment_variables):
+ func_kwargs.update({'Environment': {'Variables': environment_variables}})
+ if dead_letter_arn is not None:
+ if current_config.get('DeadLetterConfig'):
+ if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
+ func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
+ else:
+ if dead_letter_arn != "":
+ func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
+ if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode):
+ func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})
+ if kms_key_arn:
+ func_kwargs.update({'KMSKeyArn': kms_key_arn})
+
+ # If VPC configuration is desired
+ if vpc_subnet_ids:
+
+ if 'VpcConfig' in current_config:
+ # Compare VPC config with current config
+ current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
+ current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
+
+ subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
+ vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
+
+ if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
+ new_vpc_config = {'SubnetIds': vpc_subnet_ids,
+ 'SecurityGroupIds': vpc_security_group_ids}
+ func_kwargs.update({'VpcConfig': new_vpc_config})
+ else:
+ # No VPC configuration is desired, assure VPC config is empty when present in current config
+ if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
+ func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
+
+ # Upload new configuration if configuration has changed
+ if len(func_kwargs) > 1:
+ if not check_mode:
+ wait_for_lambda(client, module, name)
+
+ try:
+ if not check_mode:
+ response = client.update_function_configuration(aws_retry=True, **func_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Trying to update lambda configuration")
+
+ # Tag Function
+ if tags is not None:
+ if set_tag(client, module, tags, current_function, purge_tags):
+ changed = True
+
+ code_kwargs = _code_args(module, current_config)
+ if code_kwargs:
+
+ # Update code configuration
+ code_kwargs.update({'FunctionName': name, 'Publish': True})
+
+ if not check_mode:
+ wait_for_lambda(client, module, name)
+
+ try:
+ if not check_mode:
+ response = client.update_function_code(aws_retry=True, **code_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Trying to upload new code")
+
+ # Describe function code and configuration
+ response = get_current_function(client, name, qualifier=current_version)
+ if not response:
+ module.fail_json(msg='Unable to get function information after updating')
+ response = format_response(response)
+ # We're done
+ module.exit_json(changed=changed, code_kwargs=code_kwargs, func_kwargs=func_kwargs, **response)
+
+ # Function doesn't exists, create new Lambda function
+ elif state == 'present':
+
+ func_kwargs = {'FunctionName': name,
+ 'Publish': True,
+ 'Runtime': runtime,
+ 'Role': role_arn,
+ 'Timeout': timeout,
+ 'MemorySize': memory_size,
+ }
+
+ code = _code_args(module, {})
+ if not code:
+ module.fail_json(msg='Either S3 object or path to zipfile required')
+ if 'Architectures' in code:
+ func_kwargs.update({'Architectures': code.pop('Architectures')})
+ func_kwargs.update({'Code': code})
+
+ if description is not None:
+ func_kwargs.update({'Description': description})
+
+ if handler is not None:
+ func_kwargs.update({'Handler': handler})
+
+ if environment_variables:
+ func_kwargs.update({'Environment': {'Variables': environment_variables}})
+
+ if dead_letter_arn:
+ func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
+
+ if tracing_mode:
+ func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})
+
+ if kms_key_arn:
+ func_kwargs.update({'KMSKeyArn': kms_key_arn})
+
+ # If VPC configuration is given
+ if vpc_subnet_ids:
+ func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
+ 'SecurityGroupIds': vpc_security_group_ids}})
+
+ # Tag Function
+ if tags:
+ func_kwargs.update({'Tags': tags})
+
+ # Function would have been created if not check mode
+ if check_mode:
+ module.exit_json(changed=True)
+
+ # Finally try to create function
+ current_version = None
+ try:
+ response = client.create_function(aws_retry=True, **func_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Trying to create function")
+
+ response = get_current_function(client, name, qualifier=current_version)
+ if not response:
+ module.fail_json(msg='Unable to get function information after creating')
+ response = format_response(response)
+ module.exit_json(changed=changed, **response)
+
+ # Delete existing Lambda function
+ if state == 'absent' and current_function:
+ try:
+ if not check_mode:
+ client.delete_function(FunctionName=name, aws_retry=True)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Trying to delete Lambda function")
+
+ module.exit_json(changed=changed)
+
+ # Function already absent, do nothing
+ elif state == 'absent':
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py b/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py
new file mode 100644
index 00000000..00781c72
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_alias
+version_added: 5.0.0
+short_description: Creates, updates or deletes AWS Lambda function aliases
+description:
+ - This module allows the management of AWS Lambda functions aliases via the Ansible
+ framework. It is idempotent and supports "Check" mode. Use module M(amazon.aws.lambda) to manage the lambda function
+ itself and M(amazon.aws.lambda_event) to manage event source mappings.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+
+author:
+ - Pierre Jodouin (@pjodouin)
+ - Ryan Scott Brown (@ryansb)
+options:
+ function_name:
+ description:
+ - The name of the function alias.
+ required: true
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ name:
+ description:
+ - Name of the function alias.
+ required: true
+ aliases: ['alias_name']
+ type: str
+ description:
+ description:
+ - A short, user-defined function alias description.
+ type: str
+ function_version:
+ description:
+ - Version associated with the Lambda function alias.
+ A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
+ aliases: ['version']
+ type: int
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+---
+# Simple example to create a lambda function and publish a version
+- hosts: localhost
+ gather_facts: false
+ vars:
+ state: present
+ project_folder: /path/to/deployment/package
+ deployment_package: lambda.zip
+ account: 123456789012
+ production_version: 5
+ tasks:
+ - name: AWS Lambda Function
+ amazon.aws.lambda:
+ state: "{{ state | default('present') }}"
+ name: myLambdaFunction
+ publish: True
+ description: lambda function description
+ code_s3_bucket: package-bucket
+ code_s3_key: "lambda/{{ deployment_package }}"
+ local_path: "{{ project_folder }}/{{ deployment_package }}"
+ runtime: python2.7
+ timeout: 5
+ handler: lambda.handler
+ memory_size: 128
+ role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
+
+ - name: Get information
+ amazon.aws.lambda_info:
+ name: myLambdaFunction
+ register: lambda_info
+ - name: show results
+ ansible.builtin.debug:
+ msg: "{{ lambda_info['lambda_facts'] }}"
+
+# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
+ - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} "
+ amazon.aws.lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
+ name: Dev
+ description: Development is $LATEST version
+
+# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
+ - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} "
+ amazon.aws.lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
+ name: QA
+ version: "{{ lambda_info.lambda_facts.Version }}"
+ description: "QA is version {{ lambda_info.lambda_facts.Version }}"
+ when: lambda_info.lambda_facts.Version != "$LATEST"
+
+# The Prod alias will have a fixed version based on a variable
+ - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} "
+ amazon.aws.lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
+ name: Prod
+ version: "{{ production_version }}"
+ description: "Production is version {{ production_version }}"
+'''
+
+RETURN = '''
+---
+alias_arn:
+ description: Full ARN of the function, including the alias
+ returned: success
+ type: str
+ sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
+description:
+ description: A short description of the alias
+ returned: success
+ type: str
+ sample: The development stage for my hot new app
+function_version:
+ description: The qualifier that the alias refers to
+ returned: success
+ type: str
+ sample: $LATEST
+name:
+ description: The name of the alias assigned
+ returned: success
+ type: str
+ sample: dev
+revision_id:
+ description: A unique identifier that changes when you update the alias.
+ returned: success
+ type: str
+ sample: 12345678-1234-1234-1234-123456789abc
+'''
+
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def set_api_params(module, module_params):
+ """
+ Sets non-None module parameters to those expected by the boto3 API.
+
+ :param module:
+ :param module_params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in module_params:
+ module_param = module.params.get(param, None)
+ if module_param:
+ api_params[param] = module_param
+
+ return snake_dict_to_camel_dict(api_params, capitalize_first=True)
+
+
+def validate_params(module):
+ """
+ Performs basic parameter validation.
+
+ :param module: AnsibleAWSModule reference
+ :return:
+ """
+
+ function_name = module.params['function_name']
+
+ # validate function name
+ if not re.search(r'^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
+ if module.params['function_version'] == 0:
+ module.params['function_version'] = '$LATEST'
+ else:
+ module.params['function_version'] = str(module.params['function_version'])
+
+ return
+
+
+def get_lambda_alias(module, client):
+ """
+ Returns the lambda function alias if it exists.
+
+ :param module: AnsibleAWSModule
+ :param client: (wrapped) boto3 lambda client
+ :return:
+ """
+
+ # set API parameters
+ api_params = set_api_params(module, ('function_name', 'name'))
+
+ # check if alias exists and get facts
+ try:
+ results = client.get_alias(aws_retry=True, **api_params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ results = None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Error retrieving function alias')
+
+ return results
+
+
+def lambda_alias(module, client):
+ """
+ Adds, updates or deletes lambda function aliases.
+
+ :param module: AnsibleAWSModule
+ :param client: (wrapped) boto3 lambda client
+ :return dict:
+ """
+ results = dict()
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+
+ facts = get_lambda_alias(module, client)
+ if facts:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+ snake_facts = camel_dict_to_snake_dict(facts)
+
+ # check if alias has changed -- only version and description can change
+ alias_params = ('function_version', 'description')
+ for param in alias_params:
+ if module.params.get(param) is None:
+ continue
+ if module.params.get(param) != snake_facts.get(param):
+ changed = True
+ break
+
+ if changed:
+ api_params = set_api_params(module, ('function_name', 'name'))
+ api_params.update(set_api_params(module, alias_params))
+
+ if not module.check_mode:
+ try:
+ results = client.update_alias(aws_retry=True, **api_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error updating function alias')
+
+ else:
+ # create new function alias
+ api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
+
+ try:
+ if not module.check_mode:
+ results = client.create_alias(aws_retry=True, **api_params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error creating function alias')
+
+ else: # state = 'absent'
+ if current_state == 'present':
+ # delete the function
+ api_params = set_api_params(module, ('function_name', 'name'))
+
+ try:
+ if not module.check_mode:
+ results = client.delete_alias(aws_retry=True, **api_params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error deleting function alias')
+
+ return dict(changed=changed, **dict(results or facts or {}))
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ function_name=dict(required=True),
+ name=dict(required=True, aliases=['alias_name']),
+ function_version=dict(type='int', required=False, default=0, aliases=['version']),
+ description=dict(required=False, default=None),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[],
+ )
+
+ client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff())
+
+ validate_params(module)
+ results = lambda_alias(module, client)
+
+ module.exit_json(**camel_dict_to_snake_dict(results))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py
new file mode 100644
index 00000000..b136a87c
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py
@@ -0,0 +1,432 @@
+#!/usr/bin/python
+# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_event
+version_added: 5.0.0
+short_description: Creates, updates or deletes AWS Lambda function event mappings
+description:
+ - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
+ events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
+ AWS Lambda invokes the function.
+ It is idempotent and supports "Check" mode. Use module M(amazon.aws.lambda) to manage the lambda
+ function itself and M(amazon.aws.lambda_alias) to manage function aliases.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+
+author:
+ - Pierre Jodouin (@pjodouin)
+ - Ryan Brown (@ryansb)
+options:
+ lambda_function_arn:
+ description:
+ - The name or ARN of the lambda function.
+ required: true
+ aliases: ['function_name', 'function_arn']
+ type: str
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+ alias:
+ description:
+ - Name of the function alias.
+ - Mutually exclusive with I(version).
+ type: str
+ version:
+ description:
+ - Version of the Lambda function.
+ - Mutually exclusive with I(alias).
+ type: int
+ event_source:
+ description:
+ - Source of the event that triggers the lambda function.
+ - For DynamoDB and Kinesis events, select C(stream)
+ - For SQS queues, select C(sqs)
+ default: stream
+ choices: ['stream', 'sqs']
+ type: str
+ source_params:
+ description:
+ - Sub-parameters required for event source.
+ suboptions:
+ source_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source.
+ type: str
+ required: true
+ enabled:
+ description:
+ - Indicates whether AWS Lambda should begin polling or readin from the event source.
+ default: true
+ type: bool
+ batch_size:
+ description:
+ - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function.
+ default: 100
+ type: int
+ starting_position:
+ description:
+ - The position in the stream where AWS Lambda should start reading.
+ - Required when I(event_source=stream).
+ choices: [TRIM_HORIZON,LATEST]
+ type: str
+ required: true
+ type: dict
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+# Example that creates a lambda event notification for a DynamoDB stream
+- name: DynamoDB stream event mapping
+ amazon.aws.lambda_event:
+ state: present
+ event_source: stream
+ function_name: "{{ function_name }}"
+ alias: Dev
+ source_params:
+ source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
+ enabled: True
+ batch_size: 100
+ starting_position: TRIM_HORIZON
+ register: event
+
+- name: Show source event
+ ansible.builtin.debug:
+ var: event.lambda_stream_events
+'''
+
+RETURN = '''
+---
+lambda_stream_events:
+ description: list of dictionaries returned by the API describing stream event mappings
+ returned: success
+ type: list
+'''
+
+import re
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+
+class AWSConnection:
+ """
+ Create the connection object and client objects as required.
+ """
+
+ def __init__(self, ansible_obj, resources, use_boto3=True):
+
+ try:
+ self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
+
+ self.resource_client = dict()
+ if not resources:
+ resources = ['lambda']
+
+ resources.append('iam')
+
+ for resource in resources:
+ aws_connect_kwargs.update(dict(region=self.region,
+ endpoint=self.endpoint,
+ conn_type='client',
+ resource=resource
+ ))
+ self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
+
+ # if region is not provided, then get default profile/session region
+ if not self.region:
+ self.region = self.resource_client['lambda'].meta.region_name
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
+
+ # set account ID
+ try:
+ self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
+ except (ClientError, ValueError, KeyError, IndexError):
+ self.account_id = ''
+
+ def client(self, resource='lambda'):
+ return self.resource_client[resource]
+
+
+def pc(key):
+ """
+ Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def ordered_obj(obj):
+ """
+ Order object for comparison purposes
+
+ :param obj:
+ :return:
+ """
+
+ if isinstance(obj, dict):
+ return sorted((k, ordered_obj(v)) for k, v in obj.items())
+ if isinstance(obj, list):
+ return sorted(ordered_obj(x) for x in obj)
+ else:
+ return obj
+
+
+def set_api_sub_params(params):
+ """
+ Sets module sub-parameters to those expected by the boto3 API.
+
+ :param params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in params.keys():
+ param_value = params.get(param, None)
+ if param_value:
+ api_params[pc(param)] = param_value
+
+ return api_params
+
+
+def validate_params(module, aws):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ function_name = module.params['lambda_function_arn']
+
+ # validate function name
+ if not re.search(r'^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'):
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'):
+ module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name))
+
+ # check if 'function_name' needs to be expanded in full ARN format
+ if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
+ function_name = module.params['lambda_function_arn']
+ module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
+
+ qualifier = get_qualifier(module)
+ if qualifier:
+ function_arn = module.params['lambda_function_arn']
+ module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
+
+ return
+
+
+def get_qualifier(module):
+ """
+ Returns the function qualifier as a version or alias or None.
+
+ :param module:
+ :return:
+ """
+
+ qualifier = None
+ if module.params['version'] > 0:
+ qualifier = str(module.params['version'])
+ elif module.params['alias']:
+ qualifier = str(module.params['alias'])
+
+ return qualifier
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Lambda Event Handlers
+#
+# This section defines a lambda_event_X function where X is an AWS service capable of initiating
+# the execution of a Lambda function (pull only).
+#
+# ---------------------------------------------------------------------------------------------------
+
+def lambda_event_stream(module, aws):
+ """
+ Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ client = aws.client('lambda')
+ facts = dict()
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+
+ api_params = dict(FunctionName=module.params['lambda_function_arn'])
+
+ # check if required sub-parameters are present and valid
+ source_params = module.params['source_params']
+
+ source_arn = source_params.get('source_arn')
+ if source_arn:
+ api_params.update(EventSourceArn=source_arn)
+ else:
+ module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
+
+ # check if optional sub-parameters are valid, if present
+ batch_size = source_params.get('batch_size')
+ if batch_size:
+ try:
+ source_params['batch_size'] = int(batch_size)
+ except ValueError:
+ module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
+
+ # optional boolean value needs special treatment as not present does not imply False
+ source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
+
+ # check if event mapping exist
+ try:
+ facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
+ if facts:
+ current_state = 'present'
+ except ClientError as e:
+ module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
+
+ if state == 'present':
+ if current_state == 'absent':
+
+ starting_position = source_params.get('starting_position')
+ if starting_position:
+ api_params.update(StartingPosition=starting_position)
+ elif module.params.get('event_source') == 'sqs':
+ # starting position is not required for SQS
+ pass
+ else:
+ module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
+
+ if source_arn:
+ api_params.update(Enabled=source_param_enabled)
+ if source_params.get('batch_size'):
+ api_params.update(BatchSize=source_params.get('batch_size'))
+
+ try:
+ if not module.check_mode:
+ facts = client.create_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
+
+ else:
+ # current_state is 'present'
+ api_params = dict(FunctionName=module.params['lambda_function_arn'])
+ current_mapping = facts[0]
+ api_params.update(UUID=current_mapping['UUID'])
+ mapping_changed = False
+
+ # check if anything changed
+ if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
+ api_params.update(BatchSize=source_params['batch_size'])
+ mapping_changed = True
+
+ if source_param_enabled is not None:
+ if source_param_enabled:
+ if current_mapping['State'] not in ('Enabled', 'Enabling'):
+ api_params.update(Enabled=True)
+ mapping_changed = True
+ else:
+ if current_mapping['State'] not in ('Disabled', 'Disabling'):
+ api_params.update(Enabled=False)
+ mapping_changed = True
+
+ if mapping_changed:
+ try:
+ if not module.check_mode:
+ facts = client.update_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
+
+ else:
+ if current_state == 'present':
+ # remove the stream event mapping
+ api_params = dict(UUID=facts[0]['UUID'])
+
+ try:
+ if not module.check_mode:
+ facts = client.delete_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
+
+ return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
+
+
+def main():
+ """Produce a list of function suffixes which handle lambda events."""
+ source_choices = ["stream", "sqs"]
+
+ argument_spec = dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']),
+ event_source=dict(required=False, default="stream", choices=source_choices),
+ source_params=dict(type='dict', required=True),
+ alias=dict(required=False, default=None),
+ version=dict(type='int', required=False, default=0),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['alias', 'version']],
+ required_together=[],
+ )
+
+ aws = AWSConnection(module, ['lambda'])
+
+ validate_params(module, aws)
+
+ if module.params['event_source'].lower() in ('stream', 'sqs'):
+ results = lambda_event_stream(module, aws)
+ else:
+ module.fail_json(msg='Please select `stream` or `sqs` as the event type')
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py b/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py
new file mode 100644
index 00000000..68fff52b
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_execute
+version_added: 5.0.0
+short_description: Execute an AWS Lambda function
+description:
+ - This module executes AWS Lambda functions, allowing synchronous and asynchronous
+ invocation.
+ - Prior to release 5.0.0 this module was called C(community.aws.execute_lambda).
+ The usage did not change.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+author:
+ - "Ryan Scott Brown (@ryansb) <ryansb@redhat.com>"
+notes:
+ - Async invocation will always return an empty C(output) key.
+ - Synchronous invocation may result in a function timeout, resulting in an
+ empty C(output) key.
+options:
+ name:
+ description:
+ - The name of the function to be invoked. This can only be used for
+ invocations within the calling account. To invoke a function in another
+ account, use I(function_arn) to specify the full ARN.
+ type: str
+ function_arn:
+ description:
+ - The name of the function to be invoked
+ type: str
+ tail_log:
+ description:
+ - If I(tail_log=true), the result of the task will include the last 4 KB
+ of the CloudWatch log for the function execution. Log tailing only
+ works if you use synchronous invocation I(wait=true). This is usually
+ used for development or testing Lambdas.
+ type: bool
+ default: false
+ wait:
+ description:
+ - Whether to wait for the function results or not. If I(wait=no)
+ the task will not return any results. To wait for the Lambda function
+ to complete, set I(wait=true) and the result will be available in the
+ I(output) key.
+ type: bool
+ default: true
+ dry_run:
+ description:
+ - Do not *actually* invoke the function. A C(DryRun) call will check that
+ the caller has permissions to call the function, especially for
+ checking cross-account permissions.
+ type: bool
+ default: false
+ version_qualifier:
+ description:
+ - Which version/alias of the function to run. This defaults to the
+ C(LATEST) revision, but can be set to any existing version or alias.
+ See U(https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html)
+ for details.
+ type: str
+ payload:
+ description:
+ - A dictionary in any form to be provided as input to the Lambda function.
+ default: {}
+ type: dict
+'''
+
+EXAMPLES = '''
+- amazon.aws.lambda_execute:
+ name: test-function
+ # the payload is automatically serialized and sent to the function
+ payload:
+ foo: bar
+ value: 8
+ register: response
+
+# Test that you have sufficient permissions to execute a Lambda function in
+# another account
+- amazon.aws.lambda_execute:
+ function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function
+ dry_run: true
+
+- amazon.aws.lambda_execute:
+ name: test-function
+ payload:
+ foo: bar
+ value: 8
+ wait: true
+ tail_log: true
+ register: response
+ # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda
+
+# Pass the Lambda event payload as a json file.
+- amazon.aws.lambda_execute:
+ name: test-function
+ payload: "{{ lookup('file','lambda_event.json') }}"
+ register: response
+
+- amazon.aws.lambda_execute:
+ name: test-function
+ version_qualifier: PRODUCTION
+'''
+
+RETURN = '''
+result:
+ description: Resulting data structure from a successful task execution.
+ returned: success
+ type: dict
+ contains:
+ output:
+ description: Function output if wait=true and the function returns a value
+ returned: success
+ type: dict
+ sample: "{ 'output': 'something' }"
+ logs:
+ description: The last 4KB of the function logs. Only provided if I(tail_log) is C(true)
+ type: str
+ returned: if I(tail_log) == true
+ status:
+ description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async)
+ type: int
+ sample: 200
+ returned: always
+'''
+
+import base64
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ function_arn=dict(),
+ wait=dict(default=True, type='bool'),
+ tail_log=dict(default=False, type='bool'),
+ dry_run=dict(default=False, type='bool'),
+ version_qualifier=dict(),
+ payload=dict(default={}, type='dict'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'function_arn'],
+ ],
+ required_one_of=[
+ ('name', 'function_arn')
+ ],
+ )
+
+ name = module.params.get('name')
+ function_arn = module.params.get('function_arn')
+ await_return = module.params.get('wait')
+ dry_run = module.params.get('dry_run')
+ tail_log = module.params.get('tail_log')
+ version_qualifier = module.params.get('version_qualifier')
+ payload = module.params.get('payload')
+
+ try:
+ client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ invoke_params = {}
+
+ if await_return:
+ # await response
+ invoke_params['InvocationType'] = 'RequestResponse'
+ else:
+ # fire and forget
+ invoke_params['InvocationType'] = 'Event'
+ if dry_run or module.check_mode:
+ # dry_run overrides invocation type
+ invoke_params['InvocationType'] = 'DryRun'
+
+ if tail_log and await_return:
+ invoke_params['LogType'] = 'Tail'
+ elif tail_log and not await_return:
+ module.fail_json(msg="The `tail_log` parameter is only available if "
+ "the invocation waits for the function to complete. "
+ "Set `wait` to true or turn off `tail_log`.")
+ else:
+ invoke_params['LogType'] = 'None'
+
+ if version_qualifier:
+ invoke_params['Qualifier'] = version_qualifier
+
+ if payload:
+ invoke_params['Payload'] = json.dumps(payload)
+
+ if function_arn:
+ invoke_params['FunctionName'] = function_arn
+ elif name:
+ invoke_params['FunctionName'] = name
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ wait_for_lambda(client, module, name)
+ response = client.invoke(**invoke_params, aws_retry=True)
+ except is_boto3_error_code('ResourceNotFoundException') as nfe:
+ module.fail_json_aws(nfe, msg="Could not find Lambda to execute. Make sure "
+ "the ARN is correct and your profile has "
+ "permissions to execute this function.")
+ except botocore.exceptions.ClientError as ce: # pylint: disable=duplicate-except
+ module.fail_json_aws(ce, msg="Client-side error when invoking Lambda, check inputs and specific error")
+ except botocore.exceptions.ParamValidationError as ve: # pylint: disable=duplicate-except
+ module.fail_json_aws(ve, msg="Parameters to `invoke` failed to validate")
+ except Exception as e:
+ module.fail_json_aws(e, msg="Unexpected failure while invoking Lambda function")
+
+ results = {
+ 'logs': '',
+ 'status': response['StatusCode'],
+ 'output': '',
+ }
+
+ if response.get('LogResult'):
+ try:
+ # logs are base64 encoded in the API response
+ results['logs'] = base64.b64decode(response.get('LogResult', ''))
+ except Exception as e:
+ module.fail_json_aws(e, msg="Failed while decoding logs")
+
+ if invoke_params['InvocationType'] == 'RequestResponse':
+ try:
+ results['output'] = json.loads(response['Payload'].read().decode('utf8'))
+ except Exception as e:
+ module.fail_json_aws(e, msg="Failed while decoding function return value")
+
+ if isinstance(results.get('output'), dict) and any(
+ [results['output'].get('stackTrace'), results['output'].get('errorMessage')]):
+ # AWS sends back stack traces and error messages when a function failed
+ # in a RequestResponse (synchronous) context.
+ template = ("Function executed, but there was an error in the Lambda function. "
+ "Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
+ error_data = {
+ # format the stacktrace sent back as an array into a multiline string
+ 'trace': '\n'.join(
+ [' '.join([
+ str(x) for x in line # cast line numbers to strings
+ ]) for line in results.get('output', {}).get('stackTrace', [])]
+ ),
+ 'errmsg': results['output'].get('errorMessage'),
+ 'type': results['output'].get('errorType')
+ }
+ module.fail_json(msg=template.format(**error_data), result=results)
+
+ module.exit_json(changed=True, result=results)
+
+
+def wait_for_lambda(client, module, name):
+ try:
+ client_active_waiter = client.get_waiter('function_active')
+ client_updated_waiter = client.get_waiter('function_updated')
+ client_active_waiter.wait(FunctionName=name)
+ client_updated_waiter.wait(FunctionName=name)
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='Timeout while waiting on lambda to be Active')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed while waiting on lambda to be Active')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py
new file mode 100644
index 00000000..18946e4a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py
@@ -0,0 +1,538 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_info
+version_added: 5.0.0
+short_description: Gathers AWS Lambda function details
+description:
+ - Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
+ - Use module M(amazon.aws.lambda) to manage the lambda function itself, M(amazon.aws.lambda_alias) to manage function aliases,
+ M(amazon.aws.lambda_event) to manage lambda event source mappings, and M(amazon.aws.lambda_policy) to manage policy statements.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ query:
+ description:
+ - Specifies the resource type for which to gather information.
+ - Defaults to C(all) when I(function_name) is specified.
+ - Defaults to C(config) when I(function_name) is NOT specified.
+ choices: [ "aliases", "all", "config", "mappings", "policy", "versions", "tags" ]
+ type: str
+ function_name:
+ description:
+ - The name of the lambda function for which information is requested.
+ aliases: [ "function", "name"]
+ type: str
+ event_source_arn:
+ description:
+ - When I(query=mappings), this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
+ type: str
+author:
+ - Pierre Jodouin (@pjodouin)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+---
+# Simple example of listing all info for a function
+- name: List all for a specific function
+ amazon.aws.lambda_info:
+ query: all
+ function_name: myFunction
+ register: my_function_details
+
+# List all versions of a function
+- name: List function versions
+ amazon.aws.lambda_info:
+ query: versions
+ function_name: myFunction
+ register: my_function_versions
+
+# List all info for all functions
+- name: List all functions
+ amazon.aws.lambda_info:
+ query: all
+ register: output
+
+- name: show Lambda information
+ ansible.builtin.debug:
+ msg: "{{ output['function'] }}"
+'''
+
+RETURN = '''
+---
+function:
+ description:
+ - lambda function list.
+ - C(function) has been deprecated in will be removed in the next major release after 2025-01-01.
+ returned: success
+ type: dict
+function.TheName:
+ description:
+ - lambda function information, including event, mapping, and version information.
+ - C(function) has been deprecated in will be removed in the next major release after 2025-01-01.
+ returned: success
+ type: dict
+functions:
+ description: List of information for each lambda function matching the query.
+ returned: always
+ type: list
+ elements: dict
+ version_added: 4.1.0
+ version_added_collection: community.aws
+ contains:
+ aliases:
+ description: The aliases associated with the function.
+ returned: when C(query) is I(aliases) or I(all)
+ type: list
+ elements: str
+ architectures:
+ description: The architectures supported by the function.
+ returned: successful run where botocore >= 1.21.51
+ type: list
+ elements: str
+ sample: ['arm64']
+ code_sha256:
+ description: The SHA256 hash of the function's deployment package.
+ returned: success
+ type: str
+ sample: 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU='
+ code_size:
+ description: The size of the function's deployment package in bytes.
+ returned: success
+ type: int
+ sample: 123
+ dead_letter_config:
+ description: The function's dead letter queue.
+ returned: when the function has a dead letter queue configured
+ type: dict
+ sample: { 'target_arn': arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 }
+ contains:
+ target_arn:
+ description: The ARN of an SQS queue or SNS topic.
+ returned: when the function has a dead letter queue configured
+ type: str
+ sample: arn:aws:lambda:us-east-1:123456789012:function:myFunction:1
+ description:
+ description: The function's description.
+ returned: success
+ type: str
+ sample: 'My function'
+ environment:
+ description: The function's environment variables.
+ returned: when environment variables exist
+ type: dict
+ contains:
+ variables:
+ description: Environment variable key-value pairs.
+ returned: when environment variables exist
+ type: dict
+ sample: {'key': 'value'}
+ error:
+ description: Error message for environment variables that could not be applied.
+ returned: when there is an error applying environment variables
+ type: dict
+ contains:
+ error_code:
+ description: The error code.
+ returned: when there is an error applying environment variables
+ type: str
+ message:
+ description: The error message.
+ returned: when there is an error applying environment variables
+ type: str
+ function_arn:
+ description: The function's Amazon Resource Name (ARN).
+ returned: on success
+ type: str
+ sample: 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1'
+ function_name:
+ description: The function's name.
+ returned: on success
+ type: str
+ sample: 'myFunction'
+ handler:
+ description: The function Lambda calls to begin executing your function.
+ returned: on success
+ type: str
+ sample: 'index.handler'
+ last_modified:
+ description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ssTZD).
+ returned: on success
+ type: str
+ sample: '2017-08-01T00:00:00.000+0000'
+ mappings:
+ description: List of configuration information for each event source mapping.
+ returned: when C(query) is I(all) or I(mappings)
+ type: list
+ elements: dict
+ contains:
+ uuid:
+ description: The AWS Lambda assigned opaque identifier for the mapping.
+ returned: on success
+ type: str
+ batch_size:
+ description: The largest number of records that AWS Lambda will retrieve from the event source at the time of invoking the function.
+ returned: on success
+ type: int
+ event_source_arn:
+ description: The ARN of the Amazon Kinesis or DyanmoDB stream that is the source of events.
+ returned: on success
+ type: str
+ function_arn:
+ description: The Lambda function to invoke when AWS Lambda detects an event on the poll-based source.
+ returned: on success
+ type: str
+ last_modified:
+ description: The UTC time string indicating the last time the event mapping was updated.
+ returned: on success
+ type: str
+ last_processing_result:
+ description: The result of the last AWS Lambda invocation of your Lambda function.
+ returned: on success
+ type: str
+ state:
+ description: The state of the event source mapping.
+ returned: on success
+ type: str
+ state_transition_reason:
+ description: The reason the event source mapping is in its current state.
+ returned: on success
+ type: str
+ memory_size:
+ description: The memory allocated to the function.
+ returned: on success
+ type: int
+ sample: 128
+ policy:
+ description: The policy associated with the function.
+ returned: when C(query) is I(all) or I(policy)
+ type: dict
+ revision_id:
+ description: The latest updated revision of the function or alias.
+ returned: on success
+ type: str
+ sample: 'a2x9886d-d48a-4a0c-ab64-82abc005x80c'
+ role:
+ description: The function's execution role.
+ returned: on success
+ type: str
+ sample: 'arn:aws:iam::123456789012:role/lambda_basic_execution'
+ runtime:
+ description: The funtime environment for the Lambda function.
+ returned: on success
+ type: str
+ sample: 'nodejs6.10'
+ tracing_config:
+ description: The function's AWS X-Ray tracing configuration.
+ returned: on success
+ type: dict
+ sample: { 'mode': 'Active' }
+ contains:
+ mode:
+ description: The tracing mode.
+ returned: on success
+ type: str
+ sample: 'Active'
+ timeout:
+ description: The amount of time that Lambda allows a function to run before terminating it.
+ returned: on success
+ type: int
+ sample: 3
+ version:
+ description: The version of the Lambda function.
+ returned: on success
+ type: str
+ sample: '1'
+ versions:
+ description: List of Lambda function versions.
+ returned: when C(query) is I(all) or I(versions)
+ type: list
+ elements: dict
+ vpc_config:
+ description: The function's networking configuration.
+ returned: on success
+ type: dict
+ sample: {
+ 'security_group_ids': [],
+ 'subnet_ids': [],
+ 'vpc_id': '123'
+ }
+'''
+import json
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+
+@AWSRetry.jittered_backoff()
+def _paginate(client, function, **params):
+ paginator = client.get_paginator(function)
+ return paginator.paginate(**params).build_full_result()
+
+
+def alias_details(client, module, function_name):
+ """
+ Returns list of aliases for a specified function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :param function_name (str): Name of Lambda function to query
+ :return dict:
+ """
+
+ lambda_info = dict()
+
+ try:
+ lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases'])
+ except is_boto3_error_code('ResourceNotFoundException'):
+ lambda_info.update(aliases=[])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Trying to get aliases")
+
+ return camel_dict_to_snake_dict(lambda_info)
+
+
+def list_functions(client, module):
+ """
+ Returns queried facts for a specified function (or all functions).
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ """
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ # Function name is specified - retrieve info on that function
+ function_names = [function_name]
+
+ else:
+ # Function name is not specified - retrieve all function names
+ all_function_info = _paginate(client, 'list_functions')['Functions']
+ function_names = [function_info['FunctionName'] for function_info in all_function_info]
+
+ query = module.params['query']
+ functions = []
+
+ # keep returning deprecated response (dict of dicts) until removed
+ all_facts = {}
+
+ for function_name in function_names:
+ function = {}
+
+ # query = 'config' returns info such as FunctionName, FunctionArn, Description, etc
+ # these details should be returned regardless of the query
+ function.update(config_details(client, module, function_name))
+
+ if query in ['all', 'aliases']:
+ function.update(alias_details(client, module, function_name))
+
+ if query in ['all', 'policy']:
+ function.update(policy_details(client, module, function_name))
+
+ if query in ['all', 'versions']:
+ function.update(version_details(client, module, function_name))
+
+ if query in ['all', 'mappings']:
+ function.update(mapping_details(client, module, function_name))
+
+ if query in ['all', 'tags']:
+ function.update(tags_details(client, module, function_name))
+
+ all_facts[function['function_name']] = function
+
+ # add current lambda to list of lambdas
+ functions.append(function)
+
+ # return info
+ module.exit_json(function=all_facts, functions=functions, changed=False)
+
+
+def config_details(client, module, function_name):
+ """
+ Returns configuration details for a lambda function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :param function_name (str): Name of Lambda function to query
+ :return dict:
+ """
+
+ lambda_info = dict()
+
+ try:
+ lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name))
+ except is_boto3_error_code('ResourceNotFoundException'):
+ lambda_info.update(function={})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name))
+
+ return camel_dict_to_snake_dict(lambda_info)
+
+
+def mapping_details(client, module, function_name):
+ """
+ Returns all lambda event source mappings.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :param function_name (str): Name of Lambda function to query
+ :return dict:
+ """
+
+ lambda_info = dict()
+ params = dict()
+
+ params['FunctionName'] = function_name
+
+ if module.params.get('event_source_arn'):
+ params['EventSourceArn'] = module.params.get('event_source_arn')
+
+ try:
+ lambda_info.update(mappings=_paginate(client, 'list_event_source_mappings', **params)['EventSourceMappings'])
+ except is_boto3_error_code('ResourceNotFoundException'):
+ lambda_info.update(mappings=[])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Trying to get source event mappings")
+
+ return camel_dict_to_snake_dict(lambda_info)
+
+
+def policy_details(client, module, function_name):
+ """
+ Returns policy attached to a lambda function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :param function_name (str): Name of Lambda function to query
+ :return dict:
+ """
+
+ lambda_info = dict()
+
+ try:
+ # get_policy returns a JSON string so must convert to dict before reassigning to its key
+ lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy']))
+ except is_boto3_error_code('ResourceNotFoundException'):
+ lambda_info.update(policy={})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name))
+
+ return camel_dict_to_snake_dict(lambda_info)
+
+
+def version_details(client, module, function_name):
+ """
+ Returns all lambda function versions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :param function_name (str): Name of Lambda function to query
+ :return dict:
+ """
+
+ lambda_info = dict()
+
+ try:
+ lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions'])
+ except is_boto3_error_code('ResourceNotFoundException'):
+ lambda_info.update(versions=[])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name))
+
+ return camel_dict_to_snake_dict(lambda_info)
+
+
+def tags_details(client, module, function_name):
+ """
+ Returns tag details for a lambda function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :param function_name (str): Name of Lambda function to query
+ :return dict:
+ """
+
+ lambda_info = dict()
+
+ try:
+ lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {}))
+ except is_boto3_error_code('ResourceNotFoundException'):
+ lambda_info.update(function={})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name))
+
+ return camel_dict_to_snake_dict(lambda_info)
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = dict(
+ function_name=dict(required=False, default=None, aliases=['function', 'name']),
+ query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default=None),
+ event_source_arn=dict(required=False, default=None),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[]
+ )
+
+ # validate function_name if present
+ function_name = module.params['function_name']
+ if function_name:
+ if not re.search(r"^[\w\-:]+$", function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ # create default values for query if not specified.
+ # if function name exists, query should default to 'all'.
+ # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas.
+ if not module.params.get('query'):
+ if function_name:
+ module.params['query'] = 'all'
+ else:
+ module.params['query'] = 'config'
+
+ client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Deprecate previous return key of `function`, as it was a dict of dicts, as opposed to a list of dicts
+ module.deprecate(
+ "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be replaced by 'functions',"
+ " which returns a list of dictionaries. Both keys are returned for now.",
+ date='2025-01-01',
+ collection_name='amazon.aws'
+ )
+
+ list_functions(client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py b/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py
new file mode 100644
index 00000000..38fbef32
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# Copyright (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lambda_policy
+version_added: 5.0.0
+short_description: Creates, updates or deletes AWS Lambda policy statements.
+description:
+ - This module allows the management of AWS Lambda policy statements.
+ - It is idempotent and supports "Check" mode.
+ - Use module M(amazon.aws.lambda) to manage the lambda function itself, M(amazon.aws.lambda_alias) to manage function aliases,
+ M(amazon.aws.lambda_event) to manage event source mappings such as Kinesis streams, M(community.aws.execute_lambda) to execute a
+ lambda function and M(amazon.aws.lambda_info) to gather information relating to one or more lambda functions.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+author:
+ - Pierre Jodouin (@pjodouin)
+ - Michael De La Rue (@mikedlr)
+options:
+ function_name:
+ description:
+ - "Name of the Lambda function whose resource policy you are updating by adding a new permission."
+ - "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the"
+ - "function (for example, C(arn:aws:lambda:us-west-2:account-id:function:ThumbNail) ). AWS Lambda also allows you to"
+ - "specify partial ARN (for example, C(account-id:Thumbnail) ). Note that the length constraint applies only to the"
+ - "ARN. If you specify only the function name, it is limited to 64 character in length."
+ required: true
+ aliases: ['lambda_function_arn', 'function_arn']
+ type: str
+
+ state:
+ description:
+ - Describes the desired state.
+ default: "present"
+ choices: ["present", "absent"]
+ type: str
+
+ alias:
+ description:
+ - Name of the function alias. Mutually exclusive with I(version).
+ type: str
+
+ version:
+ description:
+ - Version of the Lambda function. Mutually exclusive with I(alias).
+ type: int
+
+ statement_id:
+ description:
+ - A unique statement identifier.
+ required: true
+ aliases: ['sid']
+ type: str
+
+ action:
+ description:
+ - "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with
+ lambda: followed by the API name (see Operations ). For example, C(lambda:CreateFunction) . You can use wildcard
+ (C(lambda:*)) to grant permission for all AWS Lambda actions."
+ required: true
+ type: str
+
+ principal:
+ description:
+ - "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if
+ you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or
+ any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom
+ application in another AWS account to push events to AWS Lambda by invoking your function."
+ required: true
+ type: str
+
+ source_arn:
+ description:
+ - This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this
+ field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from
+ the specified bucket can invoke the function.
+ type: str
+
+ source_account:
+ description:
+ - The AWS account ID (without a hyphen) of the source owner. For example, if I(source_arn) identifies a bucket,
+ then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you
+ specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS
+ account created the bucket). You can also use this condition to specify all sources (that is, you don't
+ specify the I(source_arn) ) owned by a specific account.
+ type: str
+
+ event_source_token:
+ description:
+ - Token string representing source ARN or account. Mutually exclusive with I(source_arn) or I(source_account).
+ type: str
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+
+- name: Lambda S3 event notification
+ amazon.aws.lambda_policy:
+ state: present
+ function_name: functionName
+ alias: Dev
+ statement_id: lambda-s3-myBucket-create-data-log
+ action: lambda:InvokeFunction
+ principal: s3.amazonaws.com
+ source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName
+ source_account: 123456789012
+ register: lambda_policy_action
+
+- name: show results
+ ansible.builtin.debug:
+ var: lambda_policy_action
+'''
+
+RETURN = '''
+---
+lambda_policy_action:
+ description: describes what action was taken
+ returned: success
+ type: str
+'''
+
+import json
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+
+def pc(key):
+ """
+ Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def policy_equal(module, current_statement):
+ for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'):
+ if module.params.get(param) != current_statement.get(param):
+ return False
+
+ return True
+
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+
+ :param module:
+ :param module_params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in module_params:
+ module_param = module.params.get(param)
+ if module_param is not None:
+ api_params[pc(param)] = module_param
+
+ return api_params
+
+
+def validate_params(module):
+ """
+ Performs parameter validation beyond the module framework's validation.
+
+ :param module:
+ :return:
+ """
+
+ function_name = module.params['function_name']
+
+ # validate function name
+ if function_name.startswith('arn:'):
+ if not re.search(r'^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name)
+ )
+ if len(function_name) > 140:
+ module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name))
+ else:
+ if not re.search(r'^[\w\-]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(
+ function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(
+ msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+
+def get_qualifier(module):
+ """
+ Returns the function qualifier as a version or alias or None.
+
+ :param module:
+ :return:
+ """
+
+ if module.params.get('version') is not None:
+ return to_native(module.params['version'])
+ elif module.params['alias']:
+ return to_native(module.params['alias'])
+
+ return None
+
+
+def extract_statement(policy, sid):
+ """return flattened single policy statement from a policy
+
+ If a policy statement is present in the policy extract it and
+ return it in a flattened form. Otherwise return an empty
+ dictionary.
+ """
+ if 'Statement' not in policy:
+ return {}
+ policy_statement = {}
+ # Now that we have the policy, check if required permission statement is present and flatten to
+ # simple dictionary if found.
+ for statement in policy['Statement']:
+ if statement['Sid'] == sid:
+ policy_statement['action'] = statement['Action']
+ try:
+ policy_statement['principal'] = statement['Principal']['Service']
+ except KeyError:
+ pass
+ try:
+ policy_statement['principal'] = statement['Principal']['AWS']
+ except KeyError:
+ pass
+ try:
+ policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn']
+ except KeyError:
+ pass
+ try:
+ policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount']
+ except KeyError:
+ pass
+ try:
+ policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken']
+ except KeyError:
+ pass
+ break
+
+ return policy_statement
+
+
+def get_policy_statement(module, client):
+ """Checks that policy exists and if so, that statement ID is present or absent.
+
+ :param module:
+ :param client:
+ :return:
+ """
+ sid = module.params['statement_id']
+
+ # set API parameters
+ api_params = set_api_params(module, ('function_name', ))
+ qualifier = get_qualifier(module)
+ if qualifier:
+ api_params.update(Qualifier=qualifier)
+
+ policy_results = None
+ # check if function policy exists
+ try:
+ policy_results = client.get_policy(**api_params)
+ except is_boto3_error_code('ResourceNotFoundException'):
+ return {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="retrieving function policy")
+
+ # get_policy returns a JSON string so must convert to dict before reassigning to its key
+ policy = json.loads(policy_results.get('Policy', '{}'))
+ return extract_statement(policy, sid)
+
+
+def add_policy_permission(module, client):
+ """
+ Adds a permission statement to the policy.
+
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ params = (
+ 'function_name',
+ 'statement_id',
+ 'action',
+ 'principal',
+ 'source_arn',
+ 'source_account',
+ 'event_source_token')
+ api_params = set_api_params(module, params)
+ qualifier = get_qualifier(module)
+ if qualifier:
+ api_params.update(Qualifier=qualifier)
+
+ if not module.check_mode:
+ try:
+ client.add_permission(**api_params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="adding permission to policy")
+ changed = True
+
+ return changed
+
+
+def remove_policy_permission(module, client):
+ """
+ Removed a permission statement from the policy.
+
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ changed = False
+
+ # set API parameters
+ api_params = set_api_params(module, ('function_name', 'statement_id'))
+ qualifier = get_qualifier(module)
+ if qualifier:
+ api_params.update(Qualifier=qualifier)
+
+ try:
+ if not module.check_mode:
+ client.remove_permission(**api_params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="removing permission from policy")
+
+ return changed
+
+
+def manage_state(module, lambda_client):
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+ action_taken = 'none'
+
+ # check if the policy exists
+ current_policy_statement = get_policy_statement(module, lambda_client)
+ if current_policy_statement:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present' and not policy_equal(module, current_policy_statement):
+ remove_policy_permission(module, lambda_client)
+ changed = add_policy_permission(module, lambda_client)
+ action_taken = 'updated'
+ if not current_state == 'present':
+ changed = add_policy_permission(module, lambda_client)
+ action_taken = 'added'
+ elif current_state == 'present':
+ # remove the policy statement
+ changed = remove_policy_permission(module, lambda_client)
+ action_taken = 'deleted'
+
+ return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken))
+
+
+def setup_module_object():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']),
+ statement_id=dict(required=True, aliases=['sid']),
+ alias=dict(),
+ version=dict(type='int'),
+ action=dict(required=True, ),
+ principal=dict(required=True, ),
+ source_arn=dict(),
+ source_account=dict(),
+ event_source_token=dict(no_log=False),
+ )
+
+ return AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['alias', 'version'],
+ ['event_source_token', 'source_arn'],
+ ['event_source_token', 'source_account']],
+ )
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+
+ module = setup_module_object()
+ client = module.client('lambda')
+ validate_params(module)
+ results = manage_state(module, client)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py
new file mode 100644
index 00000000..77d06d25
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py
@@ -0,0 +1,1024 @@
+#!/usr/bin/python
+# Copyright (c) 2022 Ansible Project
+# Copyright (c) 2022 Alina Buzachis (@alinabuzachis)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds_cluster
+version_added: 5.0.0
+short_description: rds_cluster module
+description:
+ - Create, modify, and delete RDS clusters.
+ - This module was originally added to C(community.aws) in release 3.2.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+author:
+ - Sloane Hertel (@s-hertel)
+ - Alina Buzachis (@alinabuzachis)
+options:
+ # General module options
+ state:
+ description: Whether the snapshot should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ creation_source:
+ description: Which source to use if creating from a template (an existing cluster, S3 bucket, or snapshot).
+ choices: ['snapshot', 's3', 'cluster']
+ type: str
+ force_update_password:
+ description:
+ - Set to C(true) to update your cluster password with I(master_user_password).
+ - Since comparing passwords to determine if it needs to be updated is not possible this is set to C(false) by default to allow idempotence.
+ type: bool
+ default: false
+ promote:
+ description: Set to C(true) to promote a read replica cluster.
+ type: bool
+ default: false
+ purge_cloudwatch_logs_exports:
+ description:
+ - Whether or not to disable Cloudwatch logs enabled for the DB cluster that are not provided in I(enable_cloudwatch_logs_exports).
+ Set I(enable_cloudwatch_logs_exports) to an empty list to disable all.
+ type: bool
+ default: true
+ purge_security_groups:
+ description:
+ - Set to C(false) to retain any enabled security groups that aren't specified in the task and are associated with the cluster.
+ - Can be applied to I(vpc_security_group_ids)
+ type: bool
+ default: true
+ wait:
+ description: Whether to wait for the cluster to be available or deleted.
+ type: bool
+ default: true
+ # Options that have a corresponding boto3 parameter
+ apply_immediately:
+ description:
+ - A value that specifies whether modifying a cluster with I(new_db_cluster_identifier) and I(master_user_password)
+ should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If C(false), changes
+ are applied during the next maintenance window.
+ type: bool
+ default: false
+ availability_zones:
+ description:
+ - A list of EC2 Availability Zones that instances in the DB cluster can be created in.
+ May be used when creating a cluster or when restoring from S3 or a snapshot.
+ aliases:
+ - zones
+ - az
+ type: list
+ elements: str
+ backtrack_to:
+ description:
+ - The timestamp of the time to backtrack the DB cluster to in ISO 8601 format, such as "2017-07-08T18:00Z".
+ type: str
+ backtrack_window:
+ description:
+ - The target backtrack window, in seconds. To disable backtracking, set this value to C(0).
+ - If specified, this value must be set to a number from C(0) to C(259,200) (72 hours).
+ default: 0
+ type: int
+ backup_retention_period:
+ description:
+ - The number of days for which automated backups are retained (must be within C(1) to C(35)).
+ May be used when creating a new cluster, when restoring from S3, or when modifying a cluster.
+ type: int
+ default: 1
+ character_set_name:
+ description:
+ - The character set to associate with the DB cluster.
+ type: str
+ database_name:
+ description:
+ - The name for your database. If a name is not provided Amazon RDS will not create a database.
+ aliases:
+ - db_name
+ type: str
+ db_cluster_identifier:
+ description:
+ - The DB cluster (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or
+ hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens.
+ aliases:
+ - cluster_id
+ - id
+ - cluster_name
+ type: str
+ required: true
+ db_cluster_parameter_group_name:
+ description:
+ - The name of the DB cluster parameter group to associate with this DB cluster.
+ If this argument is omitted when creating a cluster, the default DB cluster parameter group for the specified DB engine and version is used.
+ type: str
+ db_subnet_group_name:
+ description:
+ - A DB subnet group to associate with this DB cluster if not using the default.
+ type: str
+ enable_cloudwatch_logs_exports:
+ description:
+ - A list of log types that need to be enabled for exporting to CloudWatch Logs.
+ - Engine aurora-mysql supports C(audit), C(error), C(general) and C(slowquery).
+ - Engine aurora-postgresql supports C(postgresql).
+ type: list
+ elements: str
+ deletion_protection:
+ description:
+ - A value that indicates whether the DB cluster has deletion protection enabled.
+ The database can't be deleted when deletion protection is enabled.
+ By default, deletion protection is disabled.
+ type: bool
+ global_cluster_identifier:
+ description:
+ - The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster.
+ type: str
+ enable_http_endpoint:
+ description:
+ - A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster.
+ By default, the HTTP endpoint is disabled.
+ type: bool
+ copy_tags_to_snapshot:
+ description:
+ - Indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster.
+ The default is not to copy them.
+ type: bool
+ domain:
+ description:
+ - The Active Directory directory ID to create the DB cluster in.
+ type: str
+ domain_iam_role_name:
+ description:
+ - Specify the name of the IAM role to be used when making API calls to the Directory Service.
+ type: str
+ enable_global_write_forwarding:
+ description:
+ - A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database.
+ By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.
+ - This value can be only set on Aurora DB clusters that are members of an Aurora global database.
+ type: bool
+ enable_iam_database_authentication:
+ description:
+ - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts.
+ If this option is omitted when creating the cluster, Amazon RDS sets this to C(false).
+ type: bool
+ engine:
+ description:
+ - The name of the database engine to be used for this DB cluster. This is required to create a cluster.
+ choices:
+ - aurora
+ - aurora-mysql
+ - aurora-postgresql
+ type: str
+ engine_version:
+ description:
+ - The version number of the database engine to use.
+ - For Aurora MySQL that could be C(5.6.10a), C(5.7.12).
+ - Aurora PostgreSQL example, C(9.6.3).
+ type: str
+ final_snapshot_identifier:
+ description:
+ - The DB cluster snapshot identifier of the new DB cluster snapshot created when I(skip_final_snapshot=false).
+ type: str
+ force_backtrack:
+ description:
+ - A boolean to indicate if the DB cluster should be forced to backtrack when binary logging is enabled.
+ Otherwise, an error occurs when binary logging is enabled.
+ type: bool
+ kms_key_id:
+ description:
+ - The AWS KMS key identifier (the ARN, unless you are creating a cluster in the same account that owns the
+ KMS key, in which case the KMS key alias may be used).
+ - If I(replication_source_identifier) specifies an encrypted source Amazon RDS will use the key used toe encrypt the source.
+ - If I(storage_encrypted=true) and and I(replication_source_identifier) is not provided, the default encryption key is used.
+ type: str
+ master_user_password:
+ description:
+ - An 8-41 character password for the master database user.
+ - The password can contain any printable ASCII character except "/", """, or "@".
+ - To modify the password use I(force_password_update). Use I(apply immediately) to change
+ the password immediately, otherwise it is updated during the next maintenance window.
+ aliases:
+ - password
+ type: str
+ master_username:
+ description:
+ - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter.
+ aliases:
+ - username
+ type: str
+ new_db_cluster_identifier:
+ description:
+ - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB cluster.
+ - The identifier must contain from 1 to 63 letters, numbers, or hyphens and the first character must be a
+ letter and may not end in a hyphen or contain consecutive hyphens.
+ - Use I(apply_immediately) to rename immediately, otherwise it is updated during the next maintenance window.
+ aliases:
+ - new_cluster_id
+ - new_id
+ - new_cluster_name
+ type: str
+ option_group_name:
+ description:
+ - The option group to associate with the DB cluster.
+ type: str
+ port:
+ description:
+ - The port number on which the instances in the DB cluster accept connections. If not specified, Amazon RDS
+ defaults this to C(3306) if the I(engine) is C(aurora) and c(5432) if the I(engine) is C(aurora-postgresql).
+ type: int
+ preferred_backup_window:
+ description:
+ - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are
+ enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with
+ I(preferred_maintenance_window).
+ aliases:
+ - backup_window
+ type: str
+ preferred_maintenance_window:
+ description:
+ - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must
+ be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun.
+ aliases:
+ - maintenance_window
+ type: str
+ replication_source_identifier:
+ description:
+ - The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica.
+ aliases:
+ - replication_src_id
+ type: str
+ restore_to_time:
+ description:
+ - The UTC date and time to restore the DB cluster to. Must be in the format "2015-03-07T23:45:00Z".
+ - If this is not provided while restoring a cluster, I(use_latest_restorable_time) must be.
+ May not be specified if I(restore_type) is copy-on-write.
+ type: str
+ restore_type:
+ description:
+ - The type of restore to be performed. If not provided, Amazon RDS uses full-copy.
+ choices:
+ - full-copy
+ - copy-on-write
+ type: str
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role to associate with the Aurora DB cluster, for example
+ "arn:aws:iam::123456789012:role/AuroraAccessRole"
+ type: str
+ s3_bucket_name:
+ description:
+ - The name of the Amazon S3 bucket that contains the data used to create the Amazon Aurora DB cluster.
+ type: str
+ s3_ingestion_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access
+ the Amazon S3 bucket on your behalf.
+ type: str
+ s3_prefix:
+ description:
+ - The prefix for all of the file names that contain the data used to create the Amazon Aurora DB cluster.
+ - If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket.
+ type: str
+ skip_final_snapshot:
+ description:
+ - Whether a final DB cluster snapshot is created before the DB cluster is deleted.
+ - If this is C(false), I(final_snapshot_identifier) must be provided.
+ type: bool
+ default: false
+ snapshot_identifier:
+ description:
+ - The identifier for the DB snapshot or DB cluster snapshot to restore from.
+ - You can use either the name or the ARN to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.
+ type: str
+ source_db_cluster_identifier:
+ description:
+ - The identifier of the source DB cluster from which to restore.
+ type: str
+ source_engine:
+ description:
+ - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket.
+ choices:
+ - mysql
+ type: str
+ source_engine_version:
+ description:
+ - The version of the database that the backup files were created from.
+ type: str
+ source_region:
+ description:
+ - The ID of the region that contains the source for the DB cluster.
+ type: str
+ storage_encrypted:
+ description:
+ - Whether the DB cluster is encrypted.
+ type: bool
+ use_earliest_time_on_point_in_time_unavailable:
+ description:
+ - If I(backtrack_to) is set to a timestamp earlier than the earliest backtrack time, this value backtracks the DB cluster to
+ the earliest possible backtrack time. Otherwise, an error occurs.
+ type: bool
+ use_latest_restorable_time:
+ description:
+ - Whether to restore the DB cluster to the latest restorable backup time. Only one of I(use_latest_restorable_time)
+ and I(restore_to_time) may be provided.
+ type: bool
+ vpc_security_group_ids:
+ description:
+ - A list of EC2 VPC security groups to associate with the DB cluster.
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- name: Create minimal aurora cluster in default VPC and default subnet group
+ amazon.aws.rds_cluster:
+ cluster_id: "{{ cluster_id }}"
+ engine: "aurora"
+ password: "{{ password }}"
+ username: "{{ username }}"
+
+- name: Add a new security group without purge
+ amazon.aws.rds_cluster:
+ id: "{{ cluster_id }}"
+ state: present
+ vpc_security_group_ids:
+ - sg-0be17ba10c9286b0b
+ purge_security_groups: false
+
+- name: Modify password
+ amazon.aws.rds_cluster:
+ id: "{{ cluster_id }}"
+ state: present
+ password: "{{ new_password }}"
+ force_update_password: true
+ apply_immediately: true
+
+- name: Rename the cluster
+ amazon.aws.rds_cluster:
+ engine: aurora
+ password: "{{ password }}"
+ username: "{{ username }}"
+ cluster_id: "cluster-{{ resource_prefix }}"
+ new_cluster_id: "cluster-{{ resource_prefix }}-renamed"
+ apply_immediately: true
+
+- name: Delete aurora cluster without creating a final snapshot
+ amazon.aws.rds_cluster:
+ engine: aurora
+ password: "{{ password }}"
+ username: "{{ username }}"
+ cluster_id: "{{ cluster_id }}"
+ skip_final_snapshot: True
+ tags:
+ Name: "cluster-{{ resource_prefix }}"
+ Created_By: "Ansible_rds_cluster_integration_test"
+ state: absent
+
+- name: Restore cluster from source snapshot
+ amazon.aws.rds_cluster:
+ engine: aurora
+ password: "{{ password }}"
+ username: "{{ username }}"
+ cluster_id: "cluster-{{ resource_prefix }}-restored"
+ snapshot_identifier: "cluster-{{ resource_prefix }}-snapshot"
+'''
+
+RETURN = r'''
+activity_stream_status:
+ description: The status of the database activity stream.
+ returned: always
+ type: str
+ sample: stopped
+allocated_storage:
+ description:
+ - The allocated storage size in gigabytes. Since aurora storage size is not fixed this is
+ always 1 for aurora database engines.
+ returned: always
+ type: int
+ sample: 1
+associated_roles:
+ description:
+ - A list of dictionaries of the AWS Identity and Access Management (IAM) roles that are associated
+ with the DB cluster. Each dictionary contains the role_arn and the status of the role.
+ returned: always
+ type: list
+ sample: []
+availability_zones:
+ description: The list of availability zones that instances in the DB cluster can be created in.
+ returned: always
+ type: list
+ sample:
+ - us-east-1c
+ - us-east-1a
+ - us-east-1e
+backup_retention_period:
+ description: The number of days for which automatic DB snapshots are retained.
+ returned: always
+ type: int
+ sample: 1
+changed:
+ description: If the RDS cluster has changed.
+ returned: always
+ type: bool
+ sample: true
+cluster_create_time:
+ description: The time in UTC when the DB cluster was created.
+ returned: always
+ type: str
+ sample: '2018-06-29T14:08:58.491000+00:00'
+copy_tags_to_snapshot:
+ description:
+ - Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster.
+ returned: always
+ type: bool
+ sample: false
+cross_account_clone:
+ description:
+ - Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account.
+ returned: always
+ type: bool
+ sample: false
+db_cluster_arn:
+ description: The Amazon Resource Name (ARN) for the DB cluster.
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-east-1:123456789012:cluster:rds-cluster-demo
+db_cluster_identifier:
+ description: The lowercase user-supplied DB cluster identifier.
+ returned: always
+ type: str
+ sample: rds-cluster-demo
+db_cluster_members:
+ description:
+ - A list of dictionaries containing information about the instances in the cluster.
+ Each dictionary contains the db_instance_identifier, is_cluster_writer (bool),
+ db_cluster_parameter_group_status, and promotion_tier (int).
+ returned: always
+ type: list
+ sample: []
+db_cluster_parameter_group:
+ description: The parameter group associated with the DB cluster.
+ returned: always
+ type: str
+ sample: default.aurora5.6
+db_cluster_resource_id:
+ description: The AWS Region-unique, immutable identifier for the DB cluster.
+ returned: always
+ type: str
+ sample: cluster-D2MEQDN3BQNXDF74K6DQJTHASU
+db_subnet_group:
+ description: The name of the subnet group associated with the DB Cluster.
+ returned: always
+ type: str
+ sample: default
+deletion_protection:
+ description:
+ - Indicates if the DB cluster has deletion protection enabled.
+ The database can't be deleted when deletion protection is enabled.
+ returned: always
+ type: bool
+ sample: false
+domain_memberships:
+ description:
+ - The Active Directory Domain membership records associated with the DB cluster.
+ returned: always
+ type: list
+ sample: []
+earliest_restorable_time:
+ description: The earliest time to which a database can be restored with point-in-time restore.
+ returned: always
+ type: str
+ sample: '2018-06-29T14:09:34.797000+00:00'
+endpoint:
+ description: The connection endpoint for the primary instance of the DB cluster.
+ returned: always
+ type: str
+ sample: rds-cluster-demo.cluster-cvlrtwiennww.us-east-1.rds.amazonaws.com
+engine:
+ description: The database engine of the DB cluster.
+ returned: always
+ type: str
+ sample: aurora
+engine_mode:
+ description: The DB engine mode of the DB cluster.
+ returned: always
+ type: str
+ sample: provisioned
+engine_version:
+ description: The database engine version.
+ returned: always
+ type: str
+ sample: 5.6.10a
+hosted_zone_id:
+ description: The ID that Amazon Route 53 assigns when you create a hosted zone.
+ returned: always
+ type: str
+ sample: Z2R2ITUGPM61AM
+http_endpoint_enabled:
+ description:
+ - A value that indicates whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled.
+ returned: always
+ type: bool
+ sample: false
+iam_database_authentication_enabled:
+ description: Whether IAM accounts may be mapped to database accounts.
+ returned: always
+ type: bool
+ sample: false
+latest_restorable_time:
+ description: The latest time to which a database can be restored with point-in-time restore.
+ returned: always
+ type: str
+ sample: '2018-06-29T14:09:34.797000+00:00'
+master_username:
+ description: The master username for the DB cluster.
+ returned: always
+ type: str
+ sample: username
+multi_az:
+ description: Whether the DB cluster has instances in multiple availability zones.
+ returned: always
+ type: bool
+ sample: false
+port:
+ description: The port that the database engine is listening on.
+ returned: always
+ type: int
+ sample: 3306
+preferred_backup_window:
+ description: The UTC weekly time range during which system maintenance can occur.
+ returned: always
+ type: str
+ sample: 10:18-10:48
+preferred_maintenance_window:
+ description: The UTC weekly time range during which system maintenance can occur.
+ returned: always
+ type: str
+ sample: tue:03:23-tue:03:53
+read_replica_identifiers:
+ description: A list of read replica ID strings associated with the DB cluster.
+ returned: always
+ type: list
+ sample: []
+reader_endpoint:
+ description: The reader endpoint for the DB cluster.
+ returned: always
+ type: str
+ sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com
+status:
+ description: The status of the DB cluster.
+ returned: always
+ type: str
+ sample: available
+storage_encrypted:
+ description: Whether the DB cluster is storage encrypted.
+ returned: always
+ type: bool
+ sample: false
+tag_list:
+ description: A list of tags consisting of key-value pairs.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "key": "Created_By",
+ "value": "Ansible_rds_cluster_integration_test"
+ }
+ ]
+tags:
+ description: A dictionary of key value pairs.
+ returned: always
+ type: dict
+ sample: {
+ "Name": "rds-cluster-demo"
+ }
+vpc_security_groups:
+ description: A list of the DB cluster's security groups and their status.
+ returned: always
+ type: complex
+ contains:
+ status:
+ description: Status of the security group.
+ returned: always
+ type: str
+ sample: active
+ vpc_security_group_id:
+ description: Security group of the cluster.
+ returned: always
+ type: str
+ sample: sg-12345678
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.rds import wait_for_cluster_status
+from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_db_clusters(**params):
+ try:
+ paginator = client.get_paginator('describe_db_clusters')
+ return paginator.paginate(**params).build_full_result()['DBClusters'][0]
+ except is_boto3_error_code('DBClusterNotFoundFault'):
+ return {}
+
+
+def get_add_role_options(params_dict, cluster):
+ current_role_arns = [role['RoleArn'] for role in cluster.get('AssociatedRoles', [])]
+ role = params_dict['RoleArn']
+ if role is not None and role not in current_role_arns:
+ return {'RoleArn': role, 'DBClusterIdentifier': params_dict['DBClusterIdentifier']}
+ return {}
+
+
+def get_backtrack_options(params_dict):
+ options = ['BacktrackTo', 'DBClusterIdentifier', 'UseEarliestTimeOnPointInTimeUnavailable']
+ if params_dict['BacktrackTo'] is not None:
+ options = dict((k, params_dict[k]) for k in options if params_dict[k] is not None)
+ if 'ForceBacktrack' in params_dict:
+ options['Force'] = params_dict['ForceBacktrack']
+ return options
+ return {}
+
+
+def get_create_options(params_dict):
+ options = [
+ 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow',
+ 'CharacterSetName', 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName',
+ 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'KmsKeyId',
+ 'Engine', 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'MasterUsername',
+ 'OptionGroupName', 'Port', 'ReplicationSourceIdentifier', 'SourceRegion', 'StorageEncrypted',
+ 'Tags', 'VpcSecurityGroupIds', 'EngineMode', 'ScalingConfiguration', 'DeletionProtection',
+ 'EnableHttpEndpoint', 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName',
+ 'EnableGlobalWriteForwarding',
+ ]
+
+ return dict((k, v) for k, v in params_dict.items() if k in options and v is not None)
+
+
+def get_modify_options(params_dict, force_update_password):
+ options = [
+ 'ApplyImmediately', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow',
+ 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'EnableIAMDatabaseAuthentication',
+ 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'NewDBClusterIdentifier',
+ 'OptionGroupName', 'Port', 'VpcSecurityGroupIds', 'EnableIAMDatabaseAuthentication',
+ 'CloudwatchLogsExportConfiguration', 'DeletionProtection', 'EnableHttpEndpoint',
+ 'CopyTagsToSnapshot', 'EnableGlobalWriteForwarding', 'Domain', 'DomainIAMRoleName',
+ ]
+ modify_options = dict((k, v) for k, v in params_dict.items() if k in options and v is not None)
+ if not force_update_password:
+ modify_options.pop('MasterUserPassword', None)
+ return modify_options
+
+
+def get_delete_options(params_dict):
+ options = ['DBClusterIdentifier', 'FinalSnapshotIdentifier', 'SkipFinalSnapshot']
+ return dict((k, params_dict[k]) for k in options if params_dict[k] is not None)
+
+
+def get_restore_s3_options(params_dict):
+ options = [
+ 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'CharacterSetName',
+ 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName', 'DatabaseName',
+ 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'Engine', 'EngineVersion',
+ 'KmsKeyId', 'MasterUserPassword', 'MasterUsername', 'OptionGroupName', 'Port',
+ 'PreferredBackupWindow', 'PreferredMaintenanceWindow', 'S3BucketName', 'S3IngestionRoleArn',
+ 'S3Prefix', 'SourceEngine', 'SourceEngineVersion', 'StorageEncrypted', 'Tags',
+ 'VpcSecurityGroupIds', 'DeletionProtection', 'EnableHttpEndpoint', 'CopyTagsToSnapshot',
+ 'Domain', 'DomainIAMRoleName',
+ ]
+
+ return dict((k, v) for k, v in params_dict.items() if k in options and v is not None)
+
+
+def get_restore_snapshot_options(params_dict):
+ options = [
+ 'AvailabilityZones', 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName',
+ 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication',
+ 'Engine', 'EngineVersion', 'KmsKeyId', 'OptionGroupName', 'Port', 'SnapshotIdentifier',
+ 'Tags', 'VpcSecurityGroupIds', 'DBClusterParameterGroupName', 'DeletionProtection',
+ 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName',
+ ]
+ return dict((k, v) for k, v in params_dict.items() if k in options and v is not None)
+
+
+def get_restore_cluster_options(params_dict):
+ options = [
+ 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', 'EnableCloudwatchLogsExports',
+ 'EnableIAMDatabaseAuthentication', 'KmsKeyId', 'OptionGroupName', 'Port', 'RestoreToTime',
+ 'RestoreType', 'SourceDBClusterIdentifier', 'Tags', 'UseLatestRestorableTime',
+ 'VpcSecurityGroupIds', 'DeletionProtection', 'CopyTagsToSnapshot', 'Domain',
+ 'DomainIAMRoleName',
+ ]
+ return dict((k, v) for k, v in params_dict.items() if k in options and v is not None)
+
+
+def get_rds_method_attribute_name(cluster):
+ state = module.params['state']
+ creation_source = module.params['creation_source']
+ method_name = None
+ method_options_name = None
+
+ if state == 'absent':
+ if cluster and cluster['Status'] not in ['deleting', 'deleted']:
+ method_name = 'delete_db_cluster'
+ method_options_name = 'get_delete_options'
+ else:
+ if cluster:
+ method_name = 'modify_db_cluster'
+ method_options_name = 'get_modify_options'
+ elif creation_source == 'snapshot':
+ method_name = 'restore_db_cluster_from_snapshot'
+ method_options_name = 'get_restore_snapshot_options'
+ elif creation_source == 's3':
+ method_name = 'restore_db_cluster_from_s3'
+ method_options_name = 'get_restore_s3_options'
+ elif creation_source == 'cluster':
+ method_name = 'restore_db_cluster_to_point_in_time'
+ method_options_name = 'get_restore_cluster_options'
+ else:
+ method_name = 'create_db_cluster'
+ method_options_name = 'get_create_options'
+
+ return method_name, method_options_name
+
+
+def add_role(params):
+ if not module.check_mode:
+ try:
+ client.add_role_to_db_cluster(**params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg=f"Unable to add role {params['RoleArn']} to cluster {params['DBClusterIdentifier']}")
+ wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available')
+
+
+def backtrack_cluster(params):
+ if not module.check_mode:
+ try:
+ client.backtrack_db_cluster(**params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg=F"Unable to backtrack cluster {params['DBClusterIdentifier']}")
+ wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available')
+
+
+def get_cluster(db_cluster_id):
+ try:
+ return _describe_db_clusters(DBClusterIdentifier=db_cluster_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe DB clusters")
+
+
+def changing_cluster_options(modify_params, current_cluster):
+ changing_params = {}
+ apply_immediately = modify_params.pop('ApplyImmediately')
+ db_cluster_id = modify_params.pop('DBClusterIdentifier')
+
+ enable_cloudwatch_logs_export = modify_params.pop('EnableCloudwatchLogsExports', None)
+ if enable_cloudwatch_logs_export is not None:
+ desired_cloudwatch_logs_configuration = {'EnableLogTypes': [], 'DisableLogTypes': []}
+ provided_cloudwatch_logs = set(enable_cloudwatch_logs_export)
+ current_cloudwatch_logs_export = set(current_cluster['EnabledCloudwatchLogsExports'])
+
+ desired_cloudwatch_logs_configuration['EnableLogTypes'] = list(provided_cloudwatch_logs.difference(current_cloudwatch_logs_export))
+ if module.params['purge_cloudwatch_logs_exports']:
+ desired_cloudwatch_logs_configuration['DisableLogTypes'] = list(current_cloudwatch_logs_export.difference(provided_cloudwatch_logs))
+ changing_params['CloudwatchLogsExportConfiguration'] = desired_cloudwatch_logs_configuration
+
+ password = modify_params.pop('MasterUserPassword', None)
+ if password:
+ changing_params['MasterUserPassword'] = password
+
+ new_cluster_id = modify_params.pop('NewDBClusterIdentifier', None)
+ if new_cluster_id and new_cluster_id != current_cluster['DBClusterIdentifier']:
+ changing_params['NewDBClusterIdentifier'] = new_cluster_id
+
+ option_group = modify_params.pop('OptionGroupName', None)
+ if (
+ option_group and option_group not in [g['DBClusterOptionGroupName'] for g in current_cluster['DBClusterOptionGroupMemberships']]
+ ):
+ changing_params['OptionGroupName'] = option_group
+
+ vpc_sgs = modify_params.pop('VpcSecurityGroupIds', None)
+ if vpc_sgs:
+ desired_vpc_sgs = []
+ provided_vpc_sgs = set(vpc_sgs)
+ current_vpc_sgs = set([sg['VpcSecurityGroupId'] for sg in current_cluster['VpcSecurityGroups']])
+ if module.params['purge_security_groups']:
+ desired_vpc_sgs = vpc_sgs
+ else:
+ if provided_vpc_sgs - current_vpc_sgs:
+ desired_vpc_sgs = list(provided_vpc_sgs | current_vpc_sgs)
+
+ if desired_vpc_sgs:
+ changing_params['VpcSecurityGroupIds'] = desired_vpc_sgs
+
+ desired_db_cluster_parameter_group = modify_params.pop("DBClusterParameterGroupName", None)
+ if desired_db_cluster_parameter_group:
+ if desired_db_cluster_parameter_group != current_cluster["DBClusterParameterGroup"]:
+ changing_params["DBClusterParameterGroupName"] = desired_db_cluster_parameter_group
+
+ for param in modify_params:
+ if modify_params[param] != current_cluster[param]:
+ changing_params[param] = modify_params[param]
+
+ if changing_params:
+ changing_params['DBClusterIdentifier'] = db_cluster_id
+ if apply_immediately is not None:
+ changing_params['ApplyImmediately'] = apply_immediately
+
+ return changing_params
+
+
+def ensure_present(cluster, parameters, method_name, method_options_name):
+ changed = False
+
+ if not cluster:
+ if parameters.get('Tags') is not None:
+ parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags'])
+ call_method(client, module, method_name, eval(method_options_name)(parameters))
+ changed = True
+ else:
+ if get_backtrack_options(parameters):
+ backtrack_cluster(client, module, get_backtrack_options(parameters))
+ changed = True
+ else:
+ modifiable_options = eval(method_options_name)(parameters,
+ force_update_password=module.params['force_update_password'])
+ modify_options = changing_cluster_options(modifiable_options, cluster)
+ if modify_options:
+ call_method(client, module, method_name, modify_options)
+ changed = True
+ if module.params['tags'] is not None:
+ existing_tags = get_tags(client, module, cluster['DBClusterArn'])
+ changed |= ensure_tags(client, module, cluster['DBClusterArn'], existing_tags, module.params['tags'],
+ module.params['purge_tags'])
+
+ add_role_params = get_add_role_options(parameters, cluster)
+ if add_role_params:
+ add_role(client, module, add_role_params)
+ changed = True
+
+ if module.params['promote'] and cluster.get('ReplicationSourceIdentifier'):
+ call_method(client, module, 'promote_read_replica_db_cluster', parameters={'DBClusterIdentifier': module.params['db_cluster_identifier']})
+ changed = True
+
+ return changed
+
+
+def main():
+ global module
+ global client
+
+ arg_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ creation_source=dict(type='str', choices=['snapshot', 's3', 'cluster']),
+ force_update_password=dict(type='bool', default=False),
+ promote=dict(type='bool', default=False),
+ purge_cloudwatch_logs_exports=dict(type='bool', default=True),
+ purge_tags=dict(type='bool', default=True),
+ wait=dict(type='bool', default=True),
+ purge_security_groups=dict(type='bool', default=True),
+ )
+
+ parameter_options = dict(
+ apply_immediately=dict(type='bool', default=False),
+ availability_zones=dict(type='list', elements='str', aliases=['zones', 'az']),
+ backtrack_to=dict(),
+ backtrack_window=dict(type='int'),
+ backup_retention_period=dict(type='int', default=1),
+ character_set_name=dict(),
+ database_name=dict(aliases=['db_name']),
+ db_cluster_identifier=dict(required=True, aliases=['cluster_id', 'id', 'cluster_name']),
+ db_cluster_parameter_group_name=dict(),
+ db_subnet_group_name=dict(),
+ enable_cloudwatch_logs_exports=dict(type='list', elements='str'),
+ deletion_protection=dict(type='bool'),
+ global_cluster_identifier=dict(),
+ enable_http_endpoint=dict(type='bool'),
+ copy_tags_to_snapshot=dict(type='bool'),
+ domain=dict(),
+ domain_iam_role_name=dict(),
+ enable_global_write_forwarding=dict(type='bool'),
+ enable_iam_database_authentication=dict(type='bool'),
+ engine=dict(choices=["aurora", "aurora-mysql", "aurora-postgresql"]),
+ engine_version=dict(),
+ final_snapshot_identifier=dict(),
+ force_backtrack=dict(type='bool'),
+ kms_key_id=dict(),
+ master_user_password=dict(aliases=['password'], no_log=True),
+ master_username=dict(aliases=['username']),
+ new_db_cluster_identifier=dict(aliases=['new_cluster_id', 'new_id', 'new_cluster_name']),
+ option_group_name=dict(),
+ port=dict(type='int'),
+ preferred_backup_window=dict(aliases=['backup_window']),
+ preferred_maintenance_window=dict(aliases=['maintenance_window']),
+ replication_source_identifier=dict(aliases=['replication_src_id']),
+ restore_to_time=dict(),
+ restore_type=dict(choices=['full-copy', 'copy-on-write']),
+ role_arn=dict(),
+ s3_bucket_name=dict(),
+ s3_ingestion_role_arn=dict(),
+ s3_prefix=dict(),
+ skip_final_snapshot=dict(type='bool', default=False),
+ snapshot_identifier=dict(),
+ source_db_cluster_identifier=dict(),
+ source_engine=dict(choices=['mysql']),
+ source_engine_version=dict(),
+ source_region=dict(),
+ storage_encrypted=dict(type='bool'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ use_earliest_time_on_point_in_time_unavailable=dict(type='bool'),
+ use_latest_restorable_time=dict(type='bool'),
+ vpc_security_group_ids=dict(type='list', elements='str'),
+ )
+ arg_spec.update(parameter_options)
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ required_if=[
+ ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')),
+ ('creation_source', 's3', (
+ 's3_bucket_name', 'engine', 'master_username', 'master_user_password',
+ 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')),
+ ],
+ mutually_exclusive=[
+ ('s3_bucket_name', 'source_db_cluster_identifier', 'snapshot_identifier'),
+ ('use_latest_restorable_time', 'restore_to_time'),
+ ],
+ supports_check_mode=True
+ )
+
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+
+ try:
+ client = module.client('rds', retry_decorator=retry_decorator)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS.')
+
+ module.params['db_cluster_identifier'] = module.params['db_cluster_identifier'].lower()
+ cluster = get_cluster(module.params['db_cluster_identifier'])
+
+ if module.params['new_db_cluster_identifier']:
+ module.params['new_db_cluster_identifier'] = module.params['new_db_cluster_identifier'].lower()
+
+ if get_cluster(module.params['new_db_cluster_identifier']):
+ module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but it already exists")
+ if not cluster:
+ module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but the cluster to be renamed does not exist")
+
+ if (
+ module.params['state'] == 'absent' and module.params['skip_final_snapshot'] is False and
+ module.params['final_snapshot_identifier'] is None
+ ):
+ module.fail_json(msg='skip_final_snapshot is False but all of the following are missing: final_snapshot_identifier')
+
+ parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options))
+
+ changed = False
+ method_name, method_options_name = get_rds_method_attribute_name(cluster)
+
+ if method_name:
+ if method_name == 'delete_db_cluster':
+ call_method(client, module, method_name, eval(method_options_name)(parameters))
+ changed = True
+ else:
+ changed |= ensure_present(cluster, parameters, method_name, method_options_name)
+
+ if not module.check_mode and module.params['new_db_cluster_identifier'] and module.params['apply_immediately']:
+ cluster_id = module.params['new_db_cluster_identifier']
+ else:
+ cluster_id = module.params['db_cluster_identifier']
+
+ result = camel_dict_to_snake_dict(get_cluster(cluster_id))
+
+ if result:
+ result['tags'] = get_tags(client, module, result['db_cluster_arn'])
+
+ module.exit_json(changed=changed, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py
new file mode 100644
index 00000000..3135a4ce
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# Copyright (c) 2022 Ansible Project
+# Copyright (c) 2022 Alina Buzachis (@alinabuzachis)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: rds_cluster_info
+version_added: 5.0.0
+short_description: Obtain information about one or more RDS clusters
+description:
+ - Obtain information about one or more RDS clusters.
+ - This module was originally added to C(community.aws) in release 3.2.0.
+options:
+ db_cluster_identifier:
+ description:
+ - The user-supplied DB cluster identifier.
+ - If this parameter is specified, information from only the specific DB cluster is returned.
+ aliases:
+ - cluster_id
+ - id
+ - cluster_name
+ type: str
+ filters:
+ description:
+ - A filter that specifies one or more DB clusters to describe.
+ See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html).
+ type: dict
+author:
+ - Alina Buzachis (@alinabuzachis)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+- name: Get info of all existing DB clusters
+ amazon.aws.rds_cluster_info:
+ register: _result_cluster_info
+
+- name: Get info on a specific DB cluster
+ amazon.aws.rds_cluster_info:
+ cluster_id: "{{ cluster_id }}"
+ register: _result_cluster_info
+
+- name: Get info all DB clusters with specific engine
+ amazon.aws.rds_cluster_info:
+ engine: "aurora"
+ register: _result_cluster_info
+'''
+
+RETURN = r'''
+clusters:
+ description: List of RDS clusters.
+ returned: always
+ type: list
+ contains:
+ activity_stream_status:
+ description: The status of the database activity stream.
+ type: str
+ sample: stopped
+ allocated_storage:
+ description:
+ - The allocated storage size in gigabytes. Since aurora storage size is not fixed this is
+ always 1 for aurora database engines.
+ type: int
+ sample: 1
+ associated_roles:
+ description:
+ - A list of dictionaries of the AWS Identity and Access Management (IAM) roles that are associated
+ with the DB cluster. Each dictionary contains the role_arn and the status of the role.
+ type: list
+ sample: []
+ availability_zones:
+ description: The list of availability zones that instances in the DB cluster can be created in.
+ type: list
+ sample:
+ - us-east-1c
+ - us-east-1a
+ - us-east-1e
+ backup_retention_period:
+ description: The number of days for which automatic DB snapshots are retained.
+ type: int
+ sample: 1
+ cluster_create_time:
+ description: The time in UTC when the DB cluster was created.
+ type: str
+ sample: '2018-06-29T14:08:58.491000+00:00'
+ copy_tags_to_snapshot:
+ description:
+ - Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster.
+ type: bool
+ sample: false
+ cross_account_clone:
+ description:
+ - Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account.
+ type: bool
+ sample: false
+ db_cluster_arn:
+ description: The Amazon Resource Name (ARN) for the DB cluster.
+ type: str
+ sample: arn:aws:rds:us-east-1:123456789012:cluster:rds-cluster-demo
+ db_cluster_identifier:
+ description: The lowercase user-supplied DB cluster identifier.
+ type: str
+ sample: rds-cluster-demo
+ db_cluster_members:
+ description:
+ - A list of dictionaries containing information about the instances in the cluster.
+ Each dictionary contains the I(db_instance_identifier), I(is_cluster_writer) (bool),
+ I(db_cluster_parameter_group_status), and I(promotion_tier) (int).
+ type: list
+ sample: []
+ db_cluster_parameter_group:
+ description: The parameter group associated with the DB cluster.
+ type: str
+ sample: default.aurora5.6
+ db_cluster_resource_id:
+ description: The AWS Region-unique, immutable identifier for the DB cluster.
+ type: str
+ sample: cluster-D2MEQDN3BQNXDF74K6DQJTHASU
+ db_subnet_group:
+ description: The name of the subnet group associated with the DB Cluster.
+ type: str
+ sample: default
+ deletion_protection:
+ description:
+ - Indicates if the DB cluster has deletion protection enabled.
+ The database can't be deleted when deletion protection is enabled.
+ type: bool
+ sample: false
+ domain_memberships:
+ description:
+ - The Active Directory Domain membership records associated with the DB cluster.
+ type: list
+ sample: []
+ earliest_restorable_time:
+ description: The earliest time to which a database can be restored with point-in-time restore.
+ type: str
+ sample: '2018-06-29T14:09:34.797000+00:00'
+ endpoint:
+ description: The connection endpoint for the primary instance of the DB cluster.
+ type: str
+ sample: rds-cluster-demo.cluster-cvlrtwiennww.us-east-1.rds.amazonaws.com
+ engine:
+ description: The database engine of the DB cluster.
+ type: str
+ sample: aurora
+ engine_mode:
+ description: The DB engine mode of the DB cluster.
+ type: str
+ sample: provisioned
+ engine_version:
+ description: The database engine version.
+ type: str
+ sample: 5.6.10a
+ hosted_zone_id:
+ description: The ID that Amazon Route 53 assigns when you create a hosted zone.
+ type: str
+ sample: Z2R2ITUGPM61AM
+ http_endpoint_enabled:
+ description:
+ - A value that indicates whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled.
+ type: bool
+ sample: false
+ iam_database_authentication_enabled:
+ description: Whether IAM accounts may be mapped to database accounts.
+ type: bool
+ sample: false
+ latest_restorable_time:
+ description: The latest time to which a database can be restored with point-in-time restore.
+ type: str
+ sample: '2018-06-29T14:09:34.797000+00:00'
+ master_username:
+ description: The master username for the DB cluster.
+ type: str
+ sample: username
+ multi_az:
+ description: Whether the DB cluster has instances in multiple availability zones.
+ type: bool
+ sample: false
+ port:
+ description: The port that the database engine is listening on.
+ type: int
+ sample: 3306
+ preferred_backup_window:
+ description: The UTC weekly time range during which system maintenance can occur.
+ type: str
+ sample: 10:18-10:48
+ preferred_maintenance_window:
+ description: The UTC weekly time range during which system maintenance can occur.
+ type: str
+ sample: tue:03:23-tue:03:53
+ read_replica_identifiers:
+ description: A list of read replica ID strings associated with the DB cluster.
+ type: list
+ sample: []
+ reader_endpoint:
+ description: The reader endpoint for the DB cluster.
+ type: str
+ sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com
+ status:
+ description: The status of the DB cluster.
+ type: str
+ sample: available
+ storage_encrypted:
+ description: Whether the DB cluster is storage encrypted.
+ type: bool
+ sample: false
+ tag_list:
+ description: A list of tags consisting of key-value pairs.
+ type: list
+ elements: dict
+ sample: [
+ {
+ "key": "Created_By",
+ "value": "Ansible_rds_cluster_integration_test"
+ }
+ ]
+ tags:
+ description: A dictionary of key value pairs.
+ type: dict
+ sample: {
+ "Name": "rds-cluster-demo"
+ }
+ vpc_security_groups:
+ description: A list of the DB cluster's security groups and their status.
+ type: complex
+ contains:
+ status:
+ description: Status of the security group.
+ type: str
+ sample: active
+ vpc_security_group_id:
+ description: Security group of the cluster.
+ type: str
+ sample: sg-12345678
+'''
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_db_clusters(client, **params):
+ try:
+ paginator = client.get_paginator('describe_db_clusters')
+ return paginator.paginate(**params).build_full_result()['DBClusters']
+ except is_boto3_error_code('DBClusterNotFoundFault'):
+ return []
+
+
+def cluster_info(client, module):
+ cluster_id = module.params.get('db_cluster_identifier')
+ filters = module.params.get('filters')
+
+ params = dict()
+ if cluster_id:
+ params['DBClusterIdentifier'] = cluster_id
+ if filters:
+ params['Filters'] = ansible_dict_to_boto3_filter_list(filters)
+
+ try:
+ result = _describe_db_clusters(client, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get RDS cluster information.")
+
+ for cluster in result:
+ cluster['Tags'] = get_tags(client, module, cluster['DBClusterArn'])
+
+ return dict(changed=False, clusters=[camel_dict_to_snake_dict(cluster, ignore_list=['Tags']) for cluster in result])
+
+
+def main():
+ argument_spec = dict(
+ db_cluster_identifier=dict(aliases=['cluster_id', 'id', 'cluster_name']),
+ filters=dict(type='dict'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS.')
+
+ module.exit_json(**cluster_info(client, module))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py
new file mode 100644
index 00000000..ff712c43
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# Copyright (c) 2014 Ansible Project
+# Copyright (c) 2021 Alina Buzachis (@alinabuzachis)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds_cluster_snapshot
+version_added: 5.0.0
+short_description: Manage Amazon RDS snapshots of DB clusters
+description:
+ - Create, modify and delete RDS snapshots of DB clusters.
+ - This module was originally added to C(community.aws) in release 4.0.0.
+options:
+ state:
+ description:
+ - Specify the desired state of the snapshot.
+ default: present
+ choices: [ 'present', 'absent']
+ type: str
+ db_cluster_snapshot_identifier:
+ description:
+ - The identifier of the DB cluster snapshot.
+ required: true
+ aliases:
+ - snapshot_id
+ - id
+ - snapshot_name
+ type: str
+ db_cluster_identifier:
+ description:
+ - The identifier of the DB cluster to create a snapshot for.
+ - Required when I(state=present).
+ aliases:
+ - cluster_id
+ - cluster_name
+ type: str
+ source_db_cluster_snapshot_identifier:
+ description:
+ - The identifier of the DB cluster snapshot to copy.
+ - If the source snapshot is in the same AWS region as the copy, specify the snapshot's identifier.
+ - If the source snapshot is in a different AWS region as the copy, specify the snapshot's ARN.
+ aliases:
+ - source_id
+ - source_snapshot_id
+ type: str
+ source_region:
+ description:
+ - The region that contains the snapshot to be copied.
+ type: str
+ copy_tags:
+ description:
+ - Whether to copy all tags from I(source_db_cluster_snapshot_identifier) to I(db_cluster_snapshot_identifier).
+ type: bool
+ default: False
+ wait:
+ description:
+ - Whether or not to wait for snapshot creation or deletion.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 300
+ type: int
+notes:
+ - Retrieve the information about a specific DB cluster or list the DB cluster snapshots for a specific DB cluster
+ can de done using M(community.aws.rds_snapshot_info).
+author:
+ - Alina Buzachis (@alinabuzachis)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: Create a DB cluster snapshot
+ amazon.aws.rds_cluster_snapshot:
+ db_cluster_identifier: "{{ cluster_id }}"
+ db_cluster_snapshot_identifier: new-cluster-snapshot
+
+- name: Delete a DB cluster snapshot
+ amazon.aws.rds_cluster_snapshot:
+ db_cluster_snapshot_identifier: new-cluster-snapshot
+ state: absent
+
+- name: Copy snapshot from a different region and copy its tags
+ amazon.aws.rds_cluster_snapshot:
+ id: new-database-snapshot-copy
+ region: us-east-1
+ source_id: "{{ snapshot.db_snapshot_arn }}"
+ source_region: us-east-2
+ copy_tags: true
+'''
+
+RETURN = r'''
+availability_zone:
+ description: Availability zone of the database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: us-west-2a
+db_cluster_snapshot_identifier:
+ description: Specifies the identifier for the DB cluster snapshot.
+ returned: always
+ type: str
+ sample: ansible-test-16638696-test-snapshot
+db_cluster_identifier:
+ description: Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.
+ returned: always
+ type: str
+ sample: ansible-test-16638696
+snapshot_create_time:
+ description: Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).
+ returned: always
+ type: str
+ sample: '2019-06-15T10:46:23.776000+00:00'
+engine:
+ description: Specifies the name of the database engine for this DB cluster snapshot.
+ returned: always
+ type: str
+ sample: "aurora"
+engine_mode:
+ description: Provides the engine mode of the database engine for this DB cluster snapshot.
+ returned: always
+ type: str
+ sample: "5.6.mysql_aurora.1.22.5"
+allocated_storage:
+ description: Specifies the allocated storage size in gibibytes (GiB).
+ returned: always
+ type: int
+ sample: 20
+status:
+ description: Specifies the status of this DB cluster snapshot.
+ returned: always
+ type: str
+ sample: available
+port:
+ description: Port on which the database is listening.
+ returned: always
+ type: int
+ sample: 3306
+vpc_id:
+ description: ID of the VPC in which the DB lives.
+ returned: always
+ type: str
+ sample: vpc-09ff232e222710ae0
+cluster_create_time:
+ description: Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).
+ returned: always
+ type: str
+ sample: '2019-06-15T10:15:56.221000+00:00'
+master_username:
+ description: Provides the master username for this DB cluster snapshot.
+ returned: always
+ type: str
+ sample: test
+engine_version:
+ description: Version of the cluster from which the snapshot was created.
+ returned: always
+ type: str
+ sample: "5.6.mysql_aurora.1.22.5"
+license_model:
+ description: Provides the license model information for this DB cluster snapshot.
+ returned: always
+ type: str
+ sample: general-public-license
+snapshot_type:
+ description: How the snapshot was created (always manual for this module!).
+ returned: always
+ type: str
+ sample: manual
+percent_progress:
+ description: Specifies the percentage of the estimated data that has been transferred.
+ returned: always
+ type: int
+ sample: 100
+storage_encrypted:
+ description: Specifies whether the DB cluster snapshot is encrypted.
+ returned: always
+ type: bool
+ sample: false
+kms_key_id:
+ description: The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.
+ returned: always
+ type: str
+db_cluster_snapshot_arn:
+ description: Amazon Resource Name for the snapshot.
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot
+source_db_cluster_snapshot_arn:
+ description: If the DB cluster snapshot was copied from a source DB cluster snapshot, the ARN for the source DB cluster snapshot, otherwise, null.
+ returned: always
+ type: str
+ sample: null
+iam_database_authentication_enabled:
+ description: Whether IAM database authentication is enabled.
+ returned: always
+ type: bool
+ sample: false
+tag_list:
+ description: A list of tags.
+ returned: always
+ type: list
+ sample: []
+tags:
+ description: Tags applied to the snapshot.
+ returned: always
+ type: complex
+ contains: {}
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method
+from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute
+from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params
+
+
+def get_snapshot(snapshot_id):
+ try:
+ snapshot = client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id, aws_retry=True)["DBClusterSnapshots"][0]
+ snapshot["Tags"] = get_tags(client, module, snapshot["DBClusterSnapshotArn"])
+ except is_boto3_error_code("DBClusterSnapshotNotFound"):
+ return {}
+ except is_boto3_error_code("DBClusterSnapshotNotFoundFault"): # pylint: disable=duplicate-except
+ return {}
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id))
+ return snapshot
+
+
+def get_parameters(parameters, method_name):
+ if method_name == 'copy_db_cluster_snapshot':
+ parameters['TargetDBClusterSnapshotIdentifier'] = module.params['db_cluster_snapshot_identifier']
+
+ required_options = get_boto3_client_method_parameters(client, method_name, required=True)
+ if any(parameters.get(k) is None for k in required_options):
+ module.fail_json(msg='To {0} requires the parameters: {1}'.format(
+ get_rds_method_attribute(method_name, module).operation_description, required_options))
+ options = get_boto3_client_method_parameters(client, method_name)
+ parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None)
+
+ return parameters
+
+
+def ensure_snapshot_absent():
+ snapshot_name = module.params.get("db_cluster_snapshot_identifier")
+ params = {"DBClusterSnapshotIdentifier": snapshot_name}
+ changed = False
+
+ snapshot = get_snapshot(snapshot_name)
+ if not snapshot:
+ module.exit_json(changed=changed)
+ elif snapshot and snapshot["Status"] != "deleting":
+ snapshot, changed = call_method(client, module, "delete_db_cluster_snapshot", params)
+
+ module.exit_json(changed=changed)
+
+
+def copy_snapshot(params):
+ changed = False
+ snapshot_id = module.params.get('db_cluster_snapshot_identifier')
+ snapshot = get_snapshot(snapshot_id)
+
+ if not snapshot:
+ method_params = get_parameters(params, 'copy_db_cluster_snapshot')
+ if method_params.get('Tags'):
+ method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags'])
+ result, changed = call_method(client, module, 'copy_db_cluster_snapshot', method_params)
+
+ return changed
+
+
+def ensure_snapshot_present(params):
+ source_id = module.params.get('source_db_cluster_snapshot_identifier')
+ snapshot_name = module.params.get("db_cluster_snapshot_identifier")
+ changed = False
+
+ snapshot = get_snapshot(snapshot_name)
+
+ # Copy snapshot
+ if source_id:
+ changed |= copy_snapshot(params)
+
+ # Create snapshot
+ elif not snapshot:
+ changed |= create_snapshot(params)
+
+ # Snapshot exists and we're not creating a copy - modify exising snapshot
+ else:
+ changed |= modify_snapshot()
+
+ snapshot = get_snapshot(snapshot_name)
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']))
+
+
+def create_snapshot(params):
+ method_params = get_parameters(params, 'create_db_cluster_snapshot')
+ if method_params.get('Tags'):
+ method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags'])
+ snapshot, changed = call_method(client, module, 'create_db_cluster_snapshot', method_params)
+
+ return changed
+
+
+def modify_snapshot():
+ # TODO - add other modifications aside from purely tags
+ changed = False
+ snapshot_id = module.params.get('db_cluster_snapshot_identifier')
+ snapshot = get_snapshot(snapshot_id)
+
+ if module.params.get('tags'):
+ changed |= ensure_tags(client, module, snapshot['DBClusterSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags'])
+
+ return changed
+
+
+def main():
+ global client
+ global module
+
+ argument_spec = dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ db_cluster_snapshot_identifier=dict(type='str', aliases=['id', 'snapshot_id', 'snapshot_name'], required=True),
+ db_cluster_identifier=dict(type='str', aliases=['cluster_id', 'cluster_name']),
+ source_db_cluster_snapshot_identifier=dict(type='str', aliases=['source_id', 'source_snapshot_id']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ copy_tags=dict(type='bool', default=False),
+ source_region=dict(type='str'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+ try:
+ client = module.client('rds', retry_decorator=retry_decorator)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS.")
+
+ state = module.params.get("state")
+
+ if state == "absent":
+ ensure_snapshot_absent()
+ elif state == "present":
+ params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec))
+ ensure_snapshot_present(params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py
new file mode 100644
index 00000000..facb02ad
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py
@@ -0,0 +1,1476 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds_instance
+version_added: 5.0.0
+short_description: Manage RDS instances
+description:
+ - Create, modify, and delete RDS instances.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+author:
+ - Sloane Hertel (@s-hertel)
+
+options:
+ # General module options
+ state:
+ description:
+ - Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state
+ and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state,
+ (running if creating the DB instance).
+ - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance
+ is not idempotent.
+ choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted']
+ default: 'present'
+ type: str
+ creation_source:
+ description: Which source to use if restoring from a template (an existing instance, S3 bucket, or snapshot).
+ choices: ['snapshot', 's3', 'instance']
+ type: str
+ force_update_password:
+ description:
+ - Set to C(True) to update your instance password with I(master_user_password). Since comparing passwords to determine
+ if it needs to be updated is not possible this is set to False by default to allow idempotence.
+ type: bool
+ default: False
+ purge_cloudwatch_logs_exports:
+ description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance.
+ type: bool
+ default: True
+ read_replica:
+ description:
+ - Set to C(False) to promote a read replica instance or true to create one. When creating a read replica C(creation_source) should
+ be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option.
+ type: bool
+ wait:
+ description:
+ - Whether to wait for the instance to be available, stopped, or deleted. At a later time a I(wait_timeout) option may be added.
+ Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches
+ the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the
+ instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting).
+ If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications.
+ type: bool
+ default: True
+
+ # Options that have a corresponding boto3 parameter
+ allocated_storage:
+ description:
+ - The amount of storage (in gibibytes) to allocate for the DB instance.
+ type: int
+ allow_major_version_upgrade:
+ description:
+ - Whether to allow major version upgrades.
+ type: bool
+ apply_immediately:
+ description:
+ - A value that specifies whether modifying an instance with I(new_db_instance_identifier) and I(master_user_password)
+ should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes
+ are applied during the next maintenance window.
+ type: bool
+ default: False
+ auto_minor_version_upgrade:
+ description:
+ - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window.
+ type: bool
+ availability_zone:
+ description:
+ - A list of EC2 Availability Zones that the DB instance can be created in.
+ May be used when creating an instance or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az).
+ aliases:
+ - az
+ - zone
+ type: str
+ backup_retention_period:
+ description:
+ - The number of days for which automated backups are retained.
+ - When set to C(0), automated backups will be disabled. (Not applicable if the DB instance is a source to read replicas)
+ - May be used when creating a new instance, when restoring from S3, or when modifying an instance.
+ type: int
+ ca_certificate_identifier:
+ description:
+ - The identifier of the CA certificate for the DB instance.
+ type: str
+ character_set_name:
+ description:
+ - The character set to associate with the DB instance.
+ type: str
+ copy_tags_to_snapshot:
+ description:
+ - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating
+ a DB instance the RDS API defaults this to false if unspecified.
+ type: bool
+ db_cluster_identifier:
+ description:
+ - The DB cluster (lowercase) identifier to add the aurora DB instance to. The identifier must contain from 1 to
+ 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or
+ contain consecutive hyphens.
+ aliases:
+ - cluster_id
+ type: str
+ db_instance_class:
+ description:
+ - The compute and memory capacity of the DB instance, for example db.t2.micro.
+ aliases:
+ - class
+ - instance_type
+ type: str
+ db_instance_identifier:
+ description:
+ - The DB instance (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or
+ hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens.
+ aliases:
+ - instance_id
+ - id
+ required: True
+ type: str
+ db_name:
+ description:
+ - The name for your database. If a name is not provided Amazon RDS will not create a database.
+ type: str
+ db_parameter_group_name:
+ description:
+ - The name of the DB parameter group to associate with this DB instance. When creating the DB instance if this
+ argument is omitted the default DBParameterGroup for the specified engine is used.
+ type: str
+ db_security_groups:
+ description:
+ - (EC2-Classic platform) A list of DB security groups to associate with this DB instance.
+ type: list
+ elements: str
+ db_snapshot_identifier:
+ description:
+ - The identifier or ARN of the DB snapshot to restore from when using I(creation_source=snapshot).
+ type: str
+ aliases:
+ - snapshot_identifier
+ - snapshot_id
+ db_subnet_group_name:
+ description:
+ - The DB subnet group name to use for the DB instance.
+ aliases:
+ - subnet_group
+ type: str
+ deletion_protection:
+ description:
+ - A value that indicates whether the DB instance has deletion protection enabled.
+ The database can't be deleted when deletion protection is enabled.
+ By default, deletion protection is disabled.
+ type: bool
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ domain:
+ description:
+ - The Active Directory Domain to restore the instance in.
+ type: str
+ domain_iam_role_name:
+ description:
+ - The name of the IAM role to be used when making API calls to the Directory Service.
+ type: str
+ enable_cloudwatch_logs_exports:
+ description:
+ - A list of log types that need to be enabled for exporting to CloudWatch Logs.
+ aliases:
+ - cloudwatch_log_exports
+ type: list
+ elements: str
+ enable_iam_database_authentication:
+ description:
+ - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts.
+ If this option is omitted when creating the instance, Amazon RDS sets this to False.
+ type: bool
+ enable_performance_insights:
+ description:
+ - Whether to enable Performance Insights for the DB instance.
+ type: bool
+ engine:
+ description:
+ - The name of the database engine to be used for this DB instance. This is required to create an instance.
+ choices: ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb',
+ 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web']
+ type: str
+ engine_version:
+ description:
+ - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12.
+ Aurora PostgreSQL example, 9.6.3
+ type: str
+ final_db_snapshot_identifier:
+ description:
+ - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false.
+ aliases:
+ - final_snapshot_identifier
+ type: str
+ force_failover:
+ description:
+ - Set to true to conduct the reboot through a MultiAZ failover.
+ type: bool
+ iam_roles:
+ description:
+ - List of Amazon Web Services Identity and Access Management (IAM) roles to associate with DB instance.
+ type: list
+ elements: dict
+ suboptions:
+ feature_name:
+ description:
+ - The name of the feature associated with the IAM role.
+ type: str
+ required: true
+ role_arn:
+ description:
+ - The ARN of the IAM role to associate with the DB instance.
+ type: str
+ required: true
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ iops:
+ description:
+ - The Provisioned IOPS (I/O operations per second) value. Is only set when using I(storage_type) is set to io1.
+ type: int
+ kms_key_id:
+ description:
+ - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the
+ same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key
+ alias instead of the ARN for the KM encryption key.
+ - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used.
+ type: str
+ license_model:
+ description:
+ - The license model for the DB instance.
+ - Several options are license-included, bring-your-own-license, and general-public-license.
+ - This option can also be omitted to default to an accepted value.
+ type: str
+ master_user_password:
+ description:
+ - An 8-41 character password for the master database user. The password can contain any printable ASCII character
+ except "/", """, or "@". To modify the password use I(force_update_password). Use I(apply immediately) to change
+ the password immediately, otherwise it is updated during the next maintenance window.
+ aliases:
+ - password
+ type: str
+ master_username:
+ description:
+ - The name of the master user for the DB instance. Must be 1-16 letters or numbers and begin with a letter.
+ aliases:
+ - username
+ type: str
+ max_allocated_storage:
+ description:
+ - The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.
+ type: int
+ monitoring_interval:
+ description:
+ - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting
+ metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance.
+ type: int
+ monitoring_role_arn:
+ description:
+ - The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs.
+ type: str
+ multi_az:
+ description:
+ - Specifies if the DB instance is a Multi-AZ deployment. Mutually exclusive with I(availability_zone).
+ type: bool
+ new_db_instance_identifier:
+ description:
+ - The new DB instance (lowercase) identifier for the DB instance when renaming a DB instance. The identifier must contain
+ from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or
+ contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the
+ next maintenance window.
+ aliases:
+ - new_instance_id
+ - new_id
+ type: str
+ option_group_name:
+ description:
+ - The option group to associate with the DB instance.
+ type: str
+ performance_insights_kms_key_id:
+ description:
+ - The AWS KMS key identifier (ARN, name, or alias) for encryption of Performance Insights data.
+ type: str
+ performance_insights_retention_period:
+ description:
+ - The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731.
+ type: int
+ port:
+ description:
+ - The port number on which the instances accept connections.
+ type: int
+ preferred_backup_window:
+ description:
+ - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are
+ enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with
+ I(preferred_maintenance_window).
+ aliases:
+ - backup_window
+ type: str
+ preferred_maintenance_window:
+ description:
+ - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must
+ be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun.
+ aliases:
+ - maintenance_window
+ type: str
+ processor_features:
+ description:
+ - A dictionary of Name, Value pairs to indicate the number of CPU cores and the number of threads per core for the
+ DB instance class of the DB instance. Names are threadsPerCore and coreCount.
+ Set this option to an empty dictionary to use the default processor features.
+ suboptions:
+ threadsPerCore:
+ description: The number of threads per core
+ coreCount:
+ description: The number of CPU cores
+ type: dict
+ promotion_tier:
+ description:
+ - An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of
+ the existing primary instance.
+ type: str
+ publicly_accessible:
+ description:
+ - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with
+ a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal
+ instance with a DNS name that resolves to a private IP address.
+ type: bool
+ purge_iam_roles:
+ description:
+ - Set to C(True) to remove any IAM roles that aren't specified in the task and are associated with the instance.
+ type: bool
+ default: False
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ restore_time:
+ description:
+ - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance.
+ For example, "2009-09-07T23:45:00Z".
+ - May alternatively set I(use_latest_restore_time=True).
+ - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided.
+ type: str
+ s3_bucket_name:
+ description:
+ - The name of the Amazon S3 bucket that contains the data used to create the Amazon DB instance.
+ type: str
+ s3_ingestion_role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access
+ the Amazon S3 bucket on your behalf.
+ type: str
+ s3_prefix:
+ description:
+ - The prefix for all of the file names that contain the data used to create the Amazon DB instance. If you do not
+ specify a SourceS3Prefix value, then the Amazon DB instance is created by using all of the files in the Amazon S3 bucket.
+ type: str
+ skip_final_snapshot:
+ description:
+ - Whether a final DB instance snapshot is created before the DB instance is deleted. If this is false I(final_db_snapshot_identifier)
+ must be provided.
+ type: bool
+ default: false
+ source_db_instance_identifier:
+ description:
+ - The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time
+ DB instance using I(creation_source=instance). If the source DB is not in the same region this should be an ARN.
+ type: str
+ source_engine:
+ description:
+ - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket.
+ choices:
+ - mysql
+ type: str
+ source_engine_version:
+ description:
+ - The version of the database that the backup files were created from.
+ type: str
+ source_region:
+ description:
+ - The region of the DB instance from which the replica is created.
+ type: str
+ storage_encrypted:
+ description:
+ - Whether the DB instance is encrypted.
+ type: bool
+ storage_type:
+ description:
+ - The storage type to be associated with the DB instance. I(storage_type) does not apply to Aurora DB instances.
+ choices:
+ - standard
+ - gp2
+ - gp3
+ - io1
+ type: str
+ storage_throughput:
+ description:
+ - The storage throughput when the I(storage_type) is C(gp3).
+ - When the allocated storage is below 400 GB, the storage throughput will always be 125 mb/s.
+ - When the allocated storage is large than or equal 400 GB, the througput starts at 500 mb/s.
+ - Requires boto3 >= 1.26.0.
+ type: int
+ version_added: 5.2.0
+ tde_credential_arn:
+ description:
+ - The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is
+ supported by Oracle or SQL Server DB instances and may be used in conjunction with C(storage_encrypted)
+ though it might slightly affect the performance of your database.
+ aliases:
+ - transparent_data_encryption_arn
+ type: str
+ tde_credential_password:
+ description:
+ - The password for the given ARN from the key store in order to access the device.
+ aliases:
+ - transparent_data_encryption_password
+ type: str
+ timezone:
+ description:
+ - The time zone of the DB instance.
+ type: str
+ use_latest_restorable_time:
+ description:
+ - Whether to restore the DB instance to the latest restorable backup time.
+ - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided.
+ type: bool
+ aliases:
+ - restore_from_latest
+ vpc_security_group_ids:
+ description:
+ - A list of EC2 VPC security groups to associate with the DB instance.
+ type: list
+ elements: str
+ purge_security_groups:
+ description:
+ - Set to False to retain any enabled security groups that aren't specified in the task and are associated with the instance.
+ - Can be applied to I(vpc_security_group_ids) and I(db_security_groups)
+ type: bool
+ default: True
+ version_added: 1.5.0
+ version_added_collection: community.aws
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- name: create minimal aurora instance in default VPC and default subnet group
+ amazon.aws.rds_instance:
+ engine: aurora
+ db_instance_identifier: ansible-test-aurora-db-instance
+ instance_type: db.t2.small
+ password: "{{ password }}"
+ username: "{{ username }}"
+ cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it
+
+- name: Create a DB instance using the default AWS KMS encryption key
+ amazon.aws.rds_instance:
+ id: test-encrypted-db
+ state: present
+ engine: mariadb
+ storage_encrypted: True
+ db_instance_class: db.t2.medium
+ username: "{{ username }}"
+ password: "{{ password }}"
+ allocated_storage: "{{ allocated_storage }}"
+
+- name: remove the DB instance without a final snapshot
+ amazon.aws.rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ skip_final_snapshot: True
+
+- name: remove the DB instance with a final snapshot
+ amazon.aws.rds_instance:
+ id: "{{ instance_id }}"
+ state: absent
+ final_snapshot_identifier: "{{ snapshot_id }}"
+
+- name: Add a new security group without purge
+ amazon.aws.rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ vpc_security_group_ids:
+ - sg-0be17ba10c9286b0b
+ purge_security_groups: false
+ register: result
+
+# Add IAM role to db instance
+- name: Create IAM policy
+ community.aws.iam_managed_policy:
+ policy_name: "my-policy"
+ policy: "{{ lookup('file','files/policy.json') }}"
+ state: present
+ register: iam_policy
+
+- name: Create IAM role
+ community.aws.iam_role:
+ assume_role_policy_document: "{{ lookup('file','files/assume_policy.json') }}"
+ name: "my-role"
+ state: present
+ managed_policy: "{{ iam_policy.policy.arn }}"
+ register: iam_role
+
+- name: Create DB instance with added IAM role
+ amazon.aws.rds_instance:
+ id: "my-instance-id"
+ state: present
+ engine: postgres
+ engine_version: 14.2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: db.m6g.large
+ allocated_storage: "{{ allocated_storage }}"
+ iam_roles:
+ - role_arn: "{{ iam_role.arn }}"
+ feature_name: 's3Export'
+
+- name: Remove IAM role from DB instance
+ amazon.aws.rds_instance:
+ id: "my-instance-id"
+ state: present
+ purge_iam_roles: true
+
+# Restore DB instance from snapshot
+- name: Create a snapshot and wait until completion
+ amazon.aws.rds_instance_snapshot:
+ instance_id: 'my-instance-id'
+ snapshot_id: 'my-new-snapshot'
+ state: present
+ wait: true
+ register: snapshot
+
+- name: Restore DB from snapshot
+ amazon.aws.rds_instance:
+ id: 'my-restored-db'
+ creation_source: snapshot
+ snapshot_identifier: 'my-new-snapshot'
+ engine: mariadb
+ state: present
+ register: restored_db
+'''
+
+RETURN = r'''
+allocated_storage:
+ description: The allocated storage size in gigabytes. This is always 1 for aurora database engines.
+ returned: always
+ type: int
+ sample: 20
+associated_roles:
+ description: The list of currently associated roles.
+ returned: always
+ type: list
+ sample: []
+auto_minor_version_upgrade:
+ description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window.
+ returned: always
+ type: bool
+ sample: true
+availability_zone:
+ description: The availability zone for the DB instance.
+ returned: always
+ type: str
+ sample: us-east-1f
+backup_retention_period:
+ description: The number of days for which automated backups are retained.
+ returned: always
+ type: int
+ sample: 1
+ca_certificate_identifier:
+ description: The identifier of the CA certificate for the DB instance.
+ returned: always
+ type: str
+ sample: rds-ca-2015
+copy_tags_to_snapshot:
+ description: Whether tags are copied from the DB instance to snapshots of the DB instance.
+ returned: always
+ type: bool
+ sample: false
+db_instance_arn:
+ description: The Amazon Resource Name (ARN) for the DB instance.
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-east-1:123456789012:db:ansible-test
+db_instance_class:
+ description: The name of the compute and memory capacity class of the DB instance.
+ returned: always
+ type: str
+ sample: db.m4.large
+db_instance_identifier:
+ description: The identifier of the DB instance
+ returned: always
+ type: str
+ sample: ansible-test
+db_instance_port:
+ description: The port that the DB instance listens on.
+ returned: always
+ type: int
+ sample: 0
+db_instance_status:
+ description: The current state of this database.
+ returned: always
+ type: str
+ sample: stopped
+db_parameter_groups:
+ description: The list of DB parameter groups applied to this DB instance.
+ returned: always
+ type: complex
+ contains:
+ db_parameter_group_name:
+ description: The name of the DP parameter group.
+ returned: always
+ type: str
+ sample: default.mariadb10.0
+ parameter_apply_status:
+ description: The status of parameter updates.
+ returned: always
+ type: str
+ sample: in-sync
+db_security_groups:
+ description: A list of DB security groups associated with this DB instance.
+ returned: always
+ type: list
+ sample: []
+db_subnet_group:
+ description: The subnet group associated with the DB instance.
+ returned: always
+ type: complex
+ contains:
+ db_subnet_group_description:
+ description: The description of the DB subnet group.
+ returned: always
+ type: str
+ sample: default
+ db_subnet_group_name:
+ description: The name of the DB subnet group.
+ returned: always
+ type: str
+ sample: default
+ subnet_group_status:
+ description: The status of the DB subnet group.
+ returned: always
+ type: str
+ sample: Complete
+ subnets:
+ description: A list of Subnet elements.
+ returned: always
+ type: complex
+ contains:
+ subnet_availability_zone:
+ description: The availability zone of the subnet.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the Availability Zone.
+ returned: always
+ type: str
+ sample: us-east-1c
+ subnet_identifier:
+ description: The ID of the subnet.
+ returned: always
+ type: str
+ sample: subnet-12345678
+ subnet_status:
+ description: The status of the subnet.
+ returned: always
+ type: str
+ sample: Active
+ vpc_id:
+ description: The VpcId of the DB subnet group.
+ returned: always
+ type: str
+ sample: vpc-12345678
+dbi_resource_id:
+ description: The AWS Region-unique, immutable identifier for the DB instance.
+ returned: always
+ type: str
+ sample: db-UHV3QRNWX4KB6GALCIGRML6QFA
+deletion_protection:
+ description: C(True) if the DB instance has deletion protection enabled, C(False) if not.
+ returned: always
+ type: bool
+ sample: False
+ version_added: 3.3.0
+ version_added_collection: community.aws
+domain_memberships:
+ description: The Active Directory Domain membership records associated with the DB instance.
+ returned: always
+ type: list
+ sample: []
+endpoint:
+ description: The connection endpoint.
+ returned: always
+ type: complex
+ contains:
+ address:
+ description: The DNS address of the DB instance.
+ returned: always
+ type: str
+ sample: ansible-test.cvlrtwiennww.us-east-1.rds.amazonaws.com
+ hosted_zone_id:
+ description: The ID that Amazon Route 53 assigns when you create a hosted zone.
+ returned: always
+ type: str
+ sample: ZTR2ITUGPA61AM
+ port:
+ description: The port that the database engine is listening on.
+ returned: always
+ type: int
+ sample: 3306
+engine:
+ description: The database engine version.
+ returned: always
+ type: str
+ sample: mariadb
+engine_version:
+ description: The database engine version.
+ returned: always
+ type: str
+ sample: 10.0.35
+iam_database_authentication_enabled:
+ description: Whether mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled.
+ returned: always
+ type: bool
+ sample: false
+instance_create_time:
+ description: The date and time the DB instance was created.
+ returned: always
+ type: str
+ sample: '2018-07-04T16:48:35.332000+00:00'
+kms_key_id:
+ description: The AWS KMS key identifier for the encrypted DB instance when storage_encrypted is true.
+ returned: When storage_encrypted is true
+ type: str
+ sample: arn:aws:kms:us-east-1:123456789012:key/70c45553-ad2e-4a85-9f14-cfeb47555c33
+latest_restorable_time:
+ description: The latest time to which a database can be restored with point-in-time restore.
+ returned: always
+ type: str
+ sample: '2018-07-04T16:50:50.642000+00:00'
+license_model:
+ description: The License model information for this DB instance.
+ returned: always
+ type: str
+ sample: general-public-license
+master_username:
+ description: The master username for the DB instance.
+ returned: always
+ type: str
+ sample: test
+max_allocated_storage:
+ description: The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.
+ returned: When max allocated storage is present.
+ type: int
+ sample: 100
+monitoring_interval:
+ description:
+ - The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.
+ 0 means collecting Enhanced Monitoring metrics is disabled.
+ returned: always
+ type: int
+ sample: 0
+multi_az:
+ description: Whether the DB instance is a Multi-AZ deployment.
+ returned: always
+ type: bool
+ sample: false
+option_group_memberships:
+ description: The list of option group memberships for this DB instance.
+ returned: always
+ type: complex
+ contains:
+ option_group_name:
+ description: The name of the option group that the instance belongs to.
+ returned: always
+ type: str
+ sample: default:mariadb-10-0
+ status:
+ description: The status of the DB instance's option group membership.
+ returned: always
+ type: str
+ sample: in-sync
+pending_modified_values:
+ description: The changes to the DB instance that are pending.
+ returned: always
+ type: complex
+ contains: {}
+performance_insights_enabled:
+ description: True if Performance Insights is enabled for the DB instance, and otherwise false.
+ returned: always
+ type: bool
+ sample: false
+preferred_backup_window:
+ description: The daily time range during which automated backups are created if automated backups are enabled.
+ returned: always
+ type: str
+ sample: 07:01-07:31
+preferred_maintenance_window:
+ description: The weekly time range (in UTC) during which system maintenance can occur.
+ returned: always
+ type: str
+ sample: sun:09:31-sun:10:01
+publicly_accessible:
+ description:
+ - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an
+ internal instance with a DNS name that resolves to a private IP address.
+ returned: always
+ type: bool
+ sample: true
+read_replica_db_instance_identifiers:
+ description: Identifiers of the Read Replicas associated with this DB instance.
+ returned: always
+ type: list
+ sample: []
+storage_encrypted:
+ description: Whether the DB instance is encrypted.
+ returned: always
+ type: bool
+ sample: false
+storage_type:
+ description: The storage type to be associated with the DB instance.
+ returned: always
+ type: str
+ sample: standard
+tags:
+ description: A dictionary of tags associated with the DB instance.
+ returned: always
+ type: complex
+ contains: {}
+vpc_security_groups:
+ description: A list of VPC security group elements that the DB instance belongs to.
+ returned: always
+ type: complex
+ contains:
+ status:
+ description: The status of the VPC security group.
+ returned: always
+ type: str
+ sample: active
+ vpc_security_group_id:
+ description: The name of the VPC security group.
+ returned: always
+ type: str
+ sample: sg-12345678
+'''
+
+from time import sleep
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.six import string_types
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params
+from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method
+from ansible_collections.amazon.aws.plugins.module_utils.rds import compare_iam_roles
+from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_final_identifier
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import update_iam_roles
+
+
+valid_engines = ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb',
+ 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web']
+
+valid_engines_iam_roles = ['aurora-postgresql', 'oracle-ee', 'oracle-ee-cdb', 'oracle-se2', 'oracle-se2-cdb',
+ 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web']
+
+
+def get_rds_method_attribute_name(instance, state, creation_source, read_replica):
+ method_name = None
+ if state == 'absent' or state == 'terminated':
+ if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']:
+ method_name = 'delete_db_instance'
+ else:
+ if instance:
+ method_name = 'modify_db_instance'
+ elif read_replica is True:
+ method_name = 'create_db_instance_read_replica'
+ elif creation_source == 'snapshot':
+ method_name = 'restore_db_instance_from_db_snapshot'
+ elif creation_source == 's3':
+ method_name = 'restore_db_instance_from_s3'
+ elif creation_source == 'instance':
+ method_name = 'restore_db_instance_to_point_in_time'
+ else:
+ method_name = 'create_db_instance'
+ return method_name
+
+
+def get_instance(client, module, db_instance_id):
+ try:
+ for i in range(3):
+ try:
+ instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0]
+ instance['Tags'] = get_tags(client, module, instance['DBInstanceArn'])
+ if instance.get('ProcessorFeatures'):
+ instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures'])
+ if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
+ instance['PendingModifiedValues']['ProcessorFeatures'] = dict(
+ (feature['Name'], feature['Value'])
+ for feature in instance['PendingModifiedValues']['ProcessorFeatures']
+ )
+ break
+ except is_boto3_error_code('DBInstanceNotFound'):
+ sleep(3)
+ else:
+ instance = {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to describe DB instances')
+ return instance
+
+
+def get_final_snapshot(client, module, snapshot_identifier):
+ try:
+ snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier)
+ if len(snapshots.get('DBSnapshots', [])) == 1:
+ return snapshots['DBSnapshots'][0]
+ return {}
+ except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True
+ return {}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot')
+
+
+def get_parameters(client, module, parameters, method_name):
+ if method_name == 'restore_db_instance_to_point_in_time':
+ parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier']
+
+ required_options = get_boto3_client_method_parameters(client, method_name, required=True)
+ if any(parameters.get(k) is None for k in required_options):
+ module.fail_json(msg='To {0} requires the parameters: {1}'.format(
+ get_rds_method_attribute(method_name, module).operation_description, required_options))
+ options = get_boto3_client_method_parameters(client, method_name)
+ parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None)
+
+ if parameters.get('ProcessorFeatures') is not None:
+ parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()]
+
+ # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures)
+ if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance':
+ parameters.pop('ProcessorFeatures')
+
+ if method_name in ['create_db_instance', 'create_db_instance_read_replica', 'restore_db_instance_from_db_snapshot']:
+ if parameters.get('Tags'):
+ parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags'])
+
+ if method_name == 'modify_db_instance':
+ parameters = get_options_with_changing_values(client, module, parameters)
+
+ return parameters
+
+
+def get_options_with_changing_values(client, module, parameters):
+ instance_id = module.params['db_instance_identifier']
+ purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports']
+ force_update_password = module.params['force_update_password']
+ port = module.params['port']
+ apply_immediately = parameters.pop('ApplyImmediately', None)
+ cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports']
+ purge_security_groups = module.params['purge_security_groups']
+
+ if port:
+ parameters['DBPortNumber'] = port
+ if not force_update_password:
+ parameters.pop('MasterUserPassword', None)
+ if cloudwatch_logs_enabled:
+ parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled
+ if not module.params['storage_type']:
+ parameters.pop('Iops', None)
+
+ instance = get_instance(client, module, instance_id)
+ updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs, purge_security_groups)
+ updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance))
+ parameters = updated_parameters
+
+ if instance.get('StorageType') == 'io1':
+ # Bundle Iops and AllocatedStorage while updating io1 RDS Instance
+ current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops'])
+ current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage'])
+ new_iops = module.params.get('iops')
+ new_allocated_storage = module.params.get('allocated_storage')
+
+ if current_iops != new_iops or current_allocated_storage != new_allocated_storage:
+ parameters['AllocatedStorage'] = new_allocated_storage
+ parameters['Iops'] = new_iops
+
+ if instance.get('StorageType') == 'gp3':
+ if module.boto3_at_least('1.26.0'):
+ GP3_THROUGHPUT = True
+ current_storage_throughput = instance.get('PendingModifiedValues', {}).get('StorageThroughput', instance['StorageThroughput'])
+ new_storage_throughput = module.params.get('storage_throughput') or current_storage_throughput
+ if new_storage_throughput != current_storage_throughput:
+ parameters['StorageThroughput'] = new_storage_throughput
+ else:
+ GP3_THROUGHPUT = False
+ module.warn('gp3 volumes require boto3 >= 1.26.0. storage_throughput will be ignored.')
+
+ current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops'])
+ # when you just change from gp2 to gp3, you may not add the iops parameter
+ new_iops = module.params.get('iops') or current_iops
+
+ new_allocated_storage = module.params.get('allocated_storage')
+ current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage'])
+
+ if current_allocated_storage != new_allocated_storage:
+ parameters['AllocatedStorage'] = new_allocated_storage
+
+ if new_allocated_storage >= 400:
+ if new_iops < 12000:
+ module.fail_json(msg='IOPS must be at least 12000 when the allocated storage is larger than or equal to 400 GB.')
+
+ if new_storage_throughput < 500 and GP3_THROUGHPUT:
+ module.fail_json(msg='Storage Throughput must be at least 500 when the allocated storage is larger than or equal to 400 GB.')
+
+ if current_iops != new_iops:
+ parameters['Iops'] = new_iops
+ # must be always specified when changing iops
+ parameters['AllocatedStorage'] = new_allocated_storage
+
+ if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'):
+ if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately:
+ parameters.pop('NewDBInstanceIdentifier')
+
+ if parameters:
+ parameters['DBInstanceIdentifier'] = instance_id
+ if apply_immediately is not None:
+ parameters['ApplyImmediately'] = apply_immediately
+
+ return parameters
+
+
+def get_current_attributes_with_inconsistent_keys(instance):
+ options = {}
+ if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []):
+ current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable']
+ current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable']
+ options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled}
+ else:
+ options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []}
+ if instance.get('PendingModifiedValues', {}).get('Port'):
+ options['DBPortNumber'] = instance['PendingModifiedValues']['Port']
+ else:
+ options['DBPortNumber'] = instance['Endpoint']['Port']
+ if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'):
+ options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName']
+ else:
+ options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName']
+ if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
+ options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures']
+ else:
+ options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {})
+ options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']]
+ options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']]
+ options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']]
+ options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']]
+ options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled']
+ # PerformanceInsightsEnabled is not returned on older RDS instances it seems
+ options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False)
+ options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier']
+
+ # Neither of these are returned via describe_db_instances, so if either is specified during a check_mode run, changed=True
+ options['AllowMajorVersionUpgrade'] = None
+ options['MasterUserPassword'] = None
+
+ return options
+
+
+def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs, purge_security_groups):
+ changing_params = {}
+ current_options = get_current_attributes_with_inconsistent_keys(instance)
+ for option in current_options:
+ current_option = current_options[option]
+ desired_option = modify_params.pop(option, None)
+ if desired_option is None:
+ continue
+
+ # TODO: allow other purge_option module parameters rather than just checking for things to add
+ if isinstance(current_option, list):
+ if isinstance(desired_option, list):
+ if (
+ set(desired_option) < set(current_option) and
+ option in ('DBSecurityGroups', 'VpcSecurityGroupIds',) and purge_security_groups
+ ):
+ changing_params[option] = desired_option
+ elif set(desired_option) <= set(current_option):
+ continue
+ elif isinstance(desired_option, string_types):
+ if desired_option in current_option:
+ continue
+
+ # Current option and desired option are the same - continue loop
+ if option != 'ProcessorFeatures' and current_option == desired_option:
+ continue
+
+ if option == 'ProcessorFeatures' and current_option == boto3_tag_list_to_ansible_dict(desired_option, 'Name', 'Value'):
+ continue
+
+ # Current option and desired option are different - add to changing_params list
+ if option == 'ProcessorFeatures' and desired_option == []:
+ changing_params['UseDefaultProcessorFeatures'] = True
+ elif option == 'CloudwatchLogsExportConfiguration':
+ current_option = set(current_option.get('LogTypesToEnable', []))
+ desired_option = set(desired_option)
+ format_option = {'EnableLogTypes': [], 'DisableLogTypes': []}
+ format_option['EnableLogTypes'] = list(desired_option.difference(current_option))
+ if purge_cloudwatch_logs:
+ format_option['DisableLogTypes'] = list(current_option.difference(desired_option))
+ if format_option['EnableLogTypes'] or format_option['DisableLogTypes']:
+ changing_params[option] = format_option
+ elif option in ('DBSecurityGroups', 'VpcSecurityGroupIds',):
+ if purge_security_groups:
+ changing_params[option] = desired_option
+ else:
+ changing_params[option] = list(set(current_option) | set(desired_option))
+ else:
+ changing_params[option] = desired_option
+
+ return changing_params
+
+
+def get_changing_options_with_consistent_keys(modify_params, instance):
+ changing_params = {}
+
+ for param in modify_params:
+ current_option = instance.get('PendingModifiedValues', {}).get(param, None)
+ if current_option is None:
+ current_option = instance.get(param, None)
+ if modify_params[param] != current_option:
+ changing_params[param] = modify_params[param]
+
+ return changing_params
+
+
+def validate_options(client, module, instance):
+ state = module.params['state']
+ skip_final_snapshot = module.params['skip_final_snapshot']
+ snapshot_id = module.params['final_db_snapshot_identifier']
+ modified_id = module.params['new_db_instance_identifier']
+ engine = module.params['engine']
+ tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn'])
+ read_replica = module.params['read_replica']
+ creation_source = module.params['creation_source']
+ source_instance = module.params['source_db_instance_identifier']
+ if module.params['source_region'] is not None:
+ same_region = bool(module.params['source_region'] == module.params['region'])
+ else:
+ same_region = True
+
+ if modified_id:
+ modified_instance = get_instance(client, module, modified_id)
+ else:
+ modified_instance = {}
+
+ if modified_id and instance and modified_instance:
+ module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id))
+ if modified_id and not instance and modified_instance:
+ module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id))
+ if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None:
+ module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier')
+ if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options:
+ module.fail_json(msg='TDE is available for MySQL and Oracle DB instances')
+ if read_replica is True and not instance and creation_source not in [None, 'instance']:
+ module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source))
+ if read_replica is True and not instance and not source_instance:
+ module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier')
+
+
+def update_instance(client, module, instance, instance_id):
+ changed = False
+
+ # Get newly created DB instance
+ if not instance:
+ instance = get_instance(client, module, instance_id)
+
+ # Check tagging/promoting/rebooting/starting/stopping instance
+ changed |= ensure_tags(
+ client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags']
+ )
+ changed |= promote_replication_instance(client, module, instance, module.params['read_replica'])
+ changed |= update_instance_state(client, module, instance, module.params['state'])
+
+ return changed
+
+
+def promote_replication_instance(client, module, instance, read_replica):
+ changed = False
+ if read_replica is False:
+ # 'StatusInfos' only exists when the instance is a read replica
+ # See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-instances.html
+ if bool(instance.get('StatusInfos')):
+ try:
+ result, changed = call_method(client, module, method_name='promote_read_replica',
+ parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']})
+ except is_boto3_error_message('DB Instance is not a read replica'):
+ pass
+ return changed
+
+
+def ensure_iam_roles(client, module, instance_id):
+ '''
+ Ensure specified IAM roles are associated with DB instance
+
+ Parameters:
+ client: RDS client
+ module: AWSModule
+ instance_id: DB's instance ID
+
+ Returns:
+ changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not
+ '''
+ instance = camel_dict_to_snake_dict(get_instance(client, module, instance_id), ignore_list=['Tags', 'ProcessorFeatures'])
+
+ # Ensure engine type supports associating IAM roles
+ engine = instance.get('engine')
+ if engine not in valid_engines_iam_roles:
+ module.fail_json(msg='DB engine {0} is not valid for adding IAM roles. Valid engines are {1}'.format(engine, valid_engines_iam_roles))
+
+ changed = False
+ purge_iam_roles = module.params.get('purge_iam_roles')
+ target_roles = module.params.get('iam_roles') if module.params.get('iam_roles') else []
+ existing_roles = instance.get('associated_roles', [])
+ roles_to_add, roles_to_remove = compare_iam_roles(existing_roles, target_roles, purge_iam_roles)
+ if bool(roles_to_add or roles_to_remove):
+ changed = True
+ # Don't update on check_mode
+ if module.check_mode:
+ module.exit_json(changed=changed, **instance)
+ else:
+ update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove)
+ return changed
+
+
+def update_instance_state(client, module, instance, state):
+ changed = False
+ if state in ['rebooted', 'restarted']:
+ changed |= reboot_running_db_instance(client, module, instance)
+ if state in ['started', 'running', 'stopped']:
+ changed |= start_or_stop_instance(client, module, instance, state)
+ return changed
+
+
+def reboot_running_db_instance(client, module, instance):
+ parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']}
+ if instance['DBInstanceStatus'] in ['stopped', 'stopping']:
+ call_method(client, module, 'start_db_instance', parameters)
+ if module.params.get('force_failover') is not None:
+ parameters['ForceFailover'] = module.params['force_failover']
+ results, changed = call_method(client, module, 'reboot_db_instance', parameters)
+ return changed
+
+
+def start_or_stop_instance(client, module, instance, state):
+ changed = False
+ parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']}
+ if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']:
+ if module.params['db_snapshot_identifier']:
+ parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier']
+ result, changed = call_method(client, module, 'stop_db_instance', parameters)
+ elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']:
+ result, changed = call_method(client, module, 'start_db_instance', parameters)
+ return changed
+
+
+def main():
+ arg_spec = dict(
+ state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'),
+ creation_source=dict(choices=['snapshot', 's3', 'instance']),
+ force_update_password=dict(type='bool', default=False, no_log=False),
+ purge_cloudwatch_logs_exports=dict(type='bool', default=True),
+ purge_iam_roles=dict(type='bool', default=False),
+ purge_tags=dict(type='bool', default=True),
+ read_replica=dict(type='bool'),
+ wait=dict(type='bool', default=True),
+ purge_security_groups=dict(type='bool', default=True),
+ )
+
+ parameter_options = dict(
+ allocated_storage=dict(type='int'),
+ allow_major_version_upgrade=dict(type='bool'),
+ apply_immediately=dict(type='bool', default=False),
+ auto_minor_version_upgrade=dict(type='bool'),
+ availability_zone=dict(aliases=['az', 'zone']),
+ backup_retention_period=dict(type='int'),
+ ca_certificate_identifier=dict(),
+ character_set_name=dict(),
+ copy_tags_to_snapshot=dict(type='bool'),
+ db_cluster_identifier=dict(aliases=['cluster_id']),
+ db_instance_class=dict(aliases=['class', 'instance_type']),
+ db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']),
+ db_name=dict(),
+ db_parameter_group_name=dict(),
+ db_security_groups=dict(type='list', elements='str'),
+ db_snapshot_identifier=dict(type='str', aliases=['snapshot_identifier', 'snapshot_id']),
+ db_subnet_group_name=dict(aliases=['subnet_group']),
+ deletion_protection=dict(type='bool'),
+ domain=dict(),
+ domain_iam_role_name=dict(),
+ enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'),
+ enable_iam_database_authentication=dict(type='bool'),
+ enable_performance_insights=dict(type='bool'),
+ engine=dict(type='str', choices=valid_engines),
+ engine_version=dict(),
+ final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']),
+ force_failover=dict(type='bool'),
+ iam_roles=dict(type='list', elements='dict'),
+ iops=dict(type='int'),
+ kms_key_id=dict(),
+ license_model=dict(),
+ master_user_password=dict(aliases=['password'], no_log=True),
+ master_username=dict(aliases=['username']),
+ max_allocated_storage=dict(type='int'),
+ monitoring_interval=dict(type='int'),
+ monitoring_role_arn=dict(),
+ multi_az=dict(type='bool'),
+ new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']),
+ option_group_name=dict(),
+ performance_insights_kms_key_id=dict(),
+ performance_insights_retention_period=dict(type='int'),
+ port=dict(type='int'),
+ preferred_backup_window=dict(aliases=['backup_window']),
+ preferred_maintenance_window=dict(aliases=['maintenance_window']),
+ processor_features=dict(type='dict'),
+ promotion_tier=dict(),
+ publicly_accessible=dict(type='bool'),
+ restore_time=dict(),
+ s3_bucket_name=dict(),
+ s3_ingestion_role_arn=dict(),
+ s3_prefix=dict(),
+ skip_final_snapshot=dict(type='bool', default=False),
+ source_db_instance_identifier=dict(),
+ source_engine=dict(choices=['mysql']),
+ source_engine_version=dict(),
+ source_region=dict(),
+ storage_encrypted=dict(type='bool'),
+ storage_type=dict(choices=['standard', 'gp2', 'gp3', 'io1']),
+ storage_throughput=dict(type='int'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']),
+ tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']),
+ timezone=dict(),
+ use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']),
+ vpc_security_group_ids=dict(type='list', elements='str')
+ )
+ arg_spec.update(parameter_options)
+
+ required_if = [
+ ('engine', 'aurora', ('db_cluster_identifier',)),
+ ('engine', 'aurora-mysql', ('db_cluster_identifier',)),
+ ('engine', 'aurora-postresql', ('db_cluster_identifier',)),
+ ('storage_type', 'io1', ('iops', 'allocated_storage')),
+ ('creation_source', 'snapshot', ('db_snapshot_identifier', 'engine')),
+ ('creation_source', 's3', (
+ 's3_bucket_name', 'engine', 'master_username', 'master_user_password',
+ 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')),
+ ]
+ mutually_exclusive = [
+ ('s3_bucket_name', 'source_db_instance_identifier', 'db_snapshot_identifier'),
+ ('use_latest_restorable_time', 'restore_time'),
+ ('availability_zone', 'multi_az'),
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=arg_spec,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True
+ )
+
+ # Sanitize instance identifiers
+ module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower()
+ if module.params['new_db_instance_identifier']:
+ module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower()
+
+ # Sanitize processor features
+ if module.params['processor_features'] is not None:
+ module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items())
+
+ # Ensure dates are in lowercase
+ if module.params['preferred_maintenance_window']:
+ module.params['preferred_maintenance_window'] = module.params['preferred_maintenance_window'].lower()
+
+ # Throw warning regarding case when allow_major_version_upgrade is specified in check_mode
+ # describe_rds_instance never returns this value, so on check_mode, it will always return changed=True
+ # In non-check mode runs, changed will return the correct value, so no need to warn there.
+ # see: amazon.aws.module_util.rds.handle_errors.
+ if module.params.get('allow_major_version_upgrade') and module.check_mode:
+ module.warn('allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True` on check mode runs.')
+
+ client = module.client('rds')
+ changed = False
+ state = module.params['state']
+ instance_id = module.params['db_instance_identifier']
+ instance = get_instance(client, module, instance_id)
+ validate_options(client, module, instance)
+ method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica'])
+
+ if method_name:
+
+ # Exit on create/delete if check_mode
+ if module.check_mode and method_name in ['create_db_instance', 'delete_db_instance']:
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']))
+
+ raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options))
+ parameters_to_modify = get_parameters(client, module, raw_parameters, method_name)
+
+ if parameters_to_modify:
+ # Exit on check_mode when parameters to modify
+ if module.check_mode:
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']))
+ result, changed = call_method(client, module, method_name, parameters_to_modify)
+
+ instance_id = get_final_identifier(method_name, module)
+
+ if state != 'absent':
+ # Check tagging/promoting/rebooting/starting/stopping instance
+ if not module.check_mode or instance:
+ changed |= update_instance(client, module, instance, instance_id)
+
+ # Check IAM roles
+ if module.params.get('iam_roles') or module.params.get('purge_iam_roles'):
+ changed |= ensure_iam_roles(client, module, instance_id)
+
+ if changed:
+ instance = get_instance(client, module, instance_id)
+ if state != 'absent' and (instance or not module.check_mode):
+ for attempt_to_wait in range(0, 10):
+ instance = get_instance(client, module, instance_id)
+ if instance:
+ break
+ else:
+ sleep(5)
+
+ if state == 'absent' and changed and not module.params['skip_final_snapshot']:
+ instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier']))
+
+ pending_processor_features = None
+ if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
+ pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures')
+ instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])
+ if pending_processor_features is not None:
+ instance['pending_modified_values']['processor_features'] = pending_processor_features
+
+ module.exit_json(changed=changed, **instance)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py
new file mode 100644
index 00000000..6996b611
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+# Copyright (c) 2017, 2018 Michael De La Rue
+# Copyright (c) 2017, 2018 Will Thames
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_instance_info
+version_added: 5.0.0
+short_description: obtain information about one or more RDS instances
+description:
+ - Obtain information about one or more RDS instances.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ db_instance_identifier:
+ description:
+ - The RDS instance's unique identifier.
+ required: false
+ aliases:
+ - id
+ type: str
+ filters:
+ description:
+ - A filter that specifies one or more DB instances to describe.
+ See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html)
+ type: dict
+author:
+ - "Will Thames (@willthames)"
+ - "Michael De La Rue (@mikedlr)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: Get information about an instance
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: new-database
+ register: new_database_info
+
+- name: Get all RDS instances
+ amazon.aws.rds_instance_info:
+'''
+
+RETURN = '''
+instances:
+ description: List of RDS instances
+ returned: always
+ type: complex
+ contains:
+ allocated_storage:
+ description: Gigabytes of storage allocated to the database
+ returned: always
+ type: int
+ sample: 10
+ auto_minor_version_upgrade:
+ description: Whether minor version upgrades happen automatically
+ returned: always
+ type: bool
+ sample: true
+ availability_zone:
+ description: Availability Zone in which the database resides
+ returned: always
+ type: str
+ sample: us-west-2b
+ backup_retention_period:
+ description: Days for which backups are retained
+ returned: always
+ type: int
+ sample: 7
+ ca_certificate_identifier:
+ description: ID for the CA certificate
+ returned: always
+ type: str
+ sample: rds-ca-2015
+ copy_tags_to_snapshot:
+ description: Whether DB tags should be copied to the snapshot
+ returned: always
+ type: bool
+ sample: false
+ db_instance_arn:
+ description: ARN of the database instance
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:123456789012:db:helloworld-rds
+ db_instance_class:
+ description: Instance class of the database instance
+ returned: always
+ type: str
+ sample: db.t2.small
+ db_instance_identifier:
+ description: Database instance identifier
+ returned: always
+ type: str
+ sample: helloworld-rds
+ db_instance_port:
+ description: Port used by the database instance
+ returned: always
+ type: int
+ sample: 0
+ db_instance_status:
+ description: Status of the database instance
+ returned: always
+ type: str
+ sample: available
+ db_name:
+ description: Name of the database
+ returned: always
+ type: str
+ sample: management
+ db_parameter_groups:
+ description: List of database parameter groups
+ returned: always
+ type: complex
+ contains:
+ db_parameter_group_name:
+ description: Name of the database parameter group
+ returned: always
+ type: str
+ sample: psql-pg-helloworld
+ parameter_apply_status:
+ description: Whether the parameter group has been applied
+ returned: always
+ type: str
+ sample: in-sync
+ db_security_groups:
+ description: List of security groups used by the database instance
+ returned: always
+ type: list
+ sample: []
+ db_subnet_group:
+ description: list of subnet groups
+ returned: always
+ type: complex
+ contains:
+ db_subnet_group_description:
+ description: Description of the DB subnet group
+ returned: always
+ type: str
+ sample: My database subnet group
+ db_subnet_group_name:
+ description: Name of the database subnet group
+ returned: always
+ type: str
+ sample: my-subnet-group
+ subnet_group_status:
+ description: Subnet group status
+ returned: always
+ type: str
+ sample: Complete
+ subnets:
+ description: List of subnets in the subnet group
+ returned: always
+ type: complex
+ contains:
+ subnet_availability_zone:
+ description: Availability zone of the subnet
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: Name of the availability zone
+ returned: always
+ type: str
+ sample: us-west-2c
+ subnet_identifier:
+ description: Subnet ID
+ returned: always
+ type: str
+ sample: subnet-abcd1234
+ subnet_status:
+ description: Subnet status
+ returned: always
+ type: str
+ sample: Active
+ vpc_id:
+ description: VPC id of the subnet group
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+ dbi_resource_id:
+ description: AWS Region-unique, immutable identifier for the DB instance
+ returned: always
+ type: str
+ sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA
+ deletion_protection:
+ description: C(True) if the DB instance has deletion protection enabled, C(False) if not.
+ returned: always
+ type: bool
+ sample: False
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ domain_memberships:
+ description: List of domain memberships
+ returned: always
+ type: list
+ sample: []
+ endpoint:
+ description: Database endpoint
+ returned: always
+ type: complex
+ contains:
+ address:
+ description: Database endpoint address
+ returned: always
+ type: str
+ sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com
+ hosted_zone_id:
+ description: Route53 hosted zone ID
+ returned: always
+ type: str
+ sample: Z1PABCD0000000
+ port:
+ description: Database endpoint port
+ returned: always
+ type: int
+ sample: 5432
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: postgres
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 9.5.10
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ instance_create_time:
+ description: Date and time the instance was created
+ returned: always
+ type: str
+ sample: '2017-10-10T04:00:07.434000+00:00'
+ iops:
+ description: The Provisioned IOPS value for the DB instance.
+ returned: always
+ type: int
+ sample: 1000
+ kms_key_id:
+ description: KMS Key ID
+ returned: always
+ type: str
+ sample: arn:aws:kms:us-west-2:123456789012:key/abcd1234-0000-abcd-1111-0123456789ab
+ latest_restorable_time:
+ description: Latest time to which a database can be restored with point-in-time restore
+ returned: always
+ type: str
+ sample: '2018-05-17T00:03:56+00:00'
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: postgresql-license
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: dbadmin
+ monitoring_interval:
+ description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance
+ returned: always
+ type: int
+ sample: 0
+ multi_az:
+ description: Whether Multi-AZ is on
+ returned: always
+ type: bool
+ sample: false
+ option_group_memberships:
+ description: List of option groups
+ returned: always
+ type: complex
+ contains:
+ option_group_name:
+ description: Option group name
+ returned: always
+ type: str
+ sample: default:postgres-9-5
+ status:
+ description: Status of option group
+ returned: always
+ type: str
+ sample: in-sync
+ pending_modified_values:
+ description: Modified values pending application
+ returned: always
+ type: complex
+ contains: {}
+ performance_insights_enabled:
+ description: Whether performance insights are enabled
+ returned: always
+ type: bool
+ sample: false
+ preferred_backup_window:
+ description: Preferred backup window
+ returned: always
+ type: str
+ sample: 04:00-05:00
+ preferred_maintenance_window:
+ description: Preferred maintenance window
+ returned: always
+ type: str
+ sample: mon:05:00-mon:05:30
+ publicly_accessible:
+ description: Whether the DB is publicly accessible
+ returned: always
+ type: bool
+ sample: false
+ read_replica_db_instance_identifiers:
+ description: List of database instance read replicas
+ returned: always
+ type: list
+ sample: []
+ storage_encrypted:
+ description: Whether the storage is encrypted
+ returned: always
+ type: bool
+ sample: true
+ storage_type:
+ description: Storage type of the Database instance
+ returned: always
+ type: str
+ sample: gp2
+ tags:
+ description: Tags used by the database instance
+ returned: always
+ type: complex
+ contains: {}
+ vpc_security_groups:
+ description: List of VPC security groups
+ returned: always
+ type: complex
+ contains:
+ status:
+ description: Status of the VPC security group
+ returned: always
+ type: str
+ sample: active
+ vpc_security_group_id:
+ description: VPC Security Group ID
+ returned: always
+ type: str
+ sample: sg-abcd1234
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_tag_list_to_ansible_dict,
+ AWSRetry,
+ camel_dict_to_snake_dict,
+ )
+
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+@AWSRetry.jittered_backoff()
+def _describe_db_instances(conn, **params):
+ paginator = conn.get_paginator('describe_db_instances')
+ try:
+ results = paginator.paginate(**params).build_full_result()['DBInstances']
+ except is_boto3_error_code('DBInstanceNotFound'):
+ results = []
+
+ return results
+
+
+def instance_info(module, conn):
+ instance_name = module.params.get('db_instance_identifier')
+ filters = module.params.get('filters')
+
+ params = dict()
+ if instance_name:
+ params['DBInstanceIdentifier'] = instance_name
+ if filters:
+ params['Filters'] = ansible_dict_to_boto3_filter_list(filters)
+
+ try:
+ results = _describe_db_instances(conn, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get instance information")
+
+ for instance in results:
+ try:
+ instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'],
+ aws_retry=True)['TagList'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier'])
+
+ return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results])
+
+
+def main():
+ argument_spec = dict(
+ db_instance_identifier=dict(aliases=['id']),
+ filters=dict(type='dict')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ module.exit_json(**instance_info(module, conn))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py
new file mode 100644
index 00000000..0f779d8d
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+# Copyright (c) 2014 Ansible Project
+# Copyright (c) 2017, 2018, 2019 Will Thames
+# Copyright (c) 2017, 2018 Michael De La Rue
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds_instance_snapshot
+version_added: 5.0.0
+short_description: Manage Amazon RDS instance snapshots
+description:
+ - Creates or deletes RDS snapshots.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ state:
+ description:
+ - Specify the desired state of the snapshot.
+ default: present
+ choices: [ 'present', 'absent']
+ type: str
+ db_snapshot_identifier:
+ description:
+ - The snapshot to manage.
+ required: true
+ aliases:
+ - id
+ - snapshot_id
+ type: str
+ db_instance_identifier:
+ description:
+ - Database instance identifier. Required when creating a snapshot.
+ aliases:
+ - instance_id
+ type: str
+ source_db_snapshot_identifier:
+ description:
+ - The identifier of the source DB snapshot.
+ - Required when copying a snapshot.
+ - If the source snapshot is in the same AWS region as the copy, specify the snapshot's identifier.
+ - If the source snapshot is in a different AWS region as the copy, specify the snapshot's ARN.
+ aliases:
+ - source_id
+ - source_snapshot_id
+ type: str
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ source_region:
+ description:
+ - The region that contains the snapshot to be copied.
+ type: str
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ copy_tags:
+ description:
+ - Whether to copy all tags from I(source_db_snapshot_identifier) to I(db_instance_identifier).
+ type: bool
+ default: False
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ wait:
+ description:
+ - Whether or not to wait for snapshot creation or deletion.
+ type: bool
+ default: False
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds.
+ default: 300
+ type: int
+author:
+ - "Will Thames (@willthames)"
+ - "Michael De La Rue (@mikedlr)"
+ - "Alina Buzachis (@alinabuzachis)"
+ - "Joseph Torcasso (@jatorcasso)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+- name: Create snapshot
+ amazon.aws.rds_instance_snapshot:
+ db_instance_identifier: new-database
+ db_snapshot_identifier: new-database-snapshot
+ register: snapshot
+
+- name: Copy snapshot from a different region and copy its tags
+ amazon.aws.rds_instance_snapshot:
+ id: new-database-snapshot-copy
+ region: us-east-1
+ source_id: "{{ snapshot.db_snapshot_arn }}"
+ source_region: us-east-2
+ copy_tags: true
+
+- name: Delete snapshot
+ amazon.aws.rds_instance_snapshot:
+ db_snapshot_identifier: new-database-snapshot
+ state: absent
+'''
+
+RETURN = r'''
+allocated_storage:
+ description: How much storage is allocated in GB.
+ returned: always
+ type: int
+ sample: 20
+availability_zone:
+ description: Availability zone of the database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: us-west-2a
+db_instance_identifier:
+ description: Database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: ansible-test-16638696
+db_snapshot_arn:
+ description: Amazon Resource Name for the snapshot.
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot
+db_snapshot_identifier:
+ description: Name of the snapshot.
+ returned: always
+ type: str
+ sample: ansible-test-16638696-test-snapshot
+dbi_resource_id:
+ description: The identifier for the source DB instance, which can't be changed and which is unique to an AWS Region.
+ returned: always
+ type: str
+ sample: db-MM4P2U35RQRAMWD3QDOXWPZP4U
+encrypted:
+ description: Whether the snapshot is encrypted.
+ returned: always
+ type: bool
+ sample: false
+engine:
+ description: Engine of the database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: mariadb
+engine_version:
+ description: Version of the database from which the snapshot was created.
+ returned: always
+ type: str
+ sample: 10.2.21
+iam_database_authentication_enabled:
+ description: Whether IAM database authentication is enabled.
+ returned: always
+ type: bool
+ sample: false
+instance_create_time:
+ description: Creation time of the instance from which the snapshot was created.
+ returned: always
+ type: str
+ sample: '2019-06-15T10:15:56.221000+00:00'
+license_model:
+ description: License model of the database.
+ returned: always
+ type: str
+ sample: general-public-license
+master_username:
+ description: Master username of the database.
+ returned: always
+ type: str
+ sample: test
+option_group_name:
+ description: Option group of the database.
+ returned: always
+ type: str
+ sample: default:mariadb-10-2
+percent_progress:
+ description: How much progress has been made taking the snapshot. Will be 100 for an available snapshot.
+ returned: always
+ type: int
+ sample: 100
+port:
+ description: Port on which the database is listening.
+ returned: always
+ type: int
+ sample: 3306
+processor_features:
+ description: List of processor features of the database.
+ returned: always
+ type: list
+ sample: []
+source_db_snapshot_identifier:
+ description: The DB snapshot ARN that the DB snapshot was copied from.
+ returned: when snapshot is a copy
+ type: str
+ sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot-source
+ version_added: 3.3.0
+ version_added_collection: community.aws
+snapshot_create_time:
+ description: Creation time of the snapshot.
+ returned: always
+ type: str
+ sample: '2019-06-15T10:46:23.776000+00:00'
+snapshot_type:
+ description: How the snapshot was created (always manual for this module!).
+ returned: always
+ type: str
+ sample: manual
+status:
+ description: Status of the snapshot.
+ returned: always
+ type: str
+ sample: available
+storage_type:
+ description: Storage type of the database.
+ returned: always
+ type: str
+ sample: gp2
+tags:
+ description: Tags applied to the snapshot.
+ returned: always
+ type: complex
+ contains: {}
+vpc_id:
+ description: ID of the VPC in which the DB lives.
+ returned: always
+ type: str
+ sample: vpc-09ff232e222710ae0
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # protected by AnsibleAWSModule
+
+# import module snippets
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params
+from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method
+from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+
+
+def get_snapshot(snapshot_id):
+ try:
+ snapshot = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)['DBSnapshots'][0]
+ snapshot['Tags'] = get_tags(client, module, snapshot['DBSnapshotArn'])
+ except is_boto3_error_code("DBSnapshotNotFound"):
+ return {}
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id))
+ return snapshot
+
+
+def get_parameters(parameters, method_name):
+ if method_name == 'copy_db_snapshot':
+ parameters['TargetDBSnapshotIdentifier'] = module.params['db_snapshot_identifier']
+
+ required_options = get_boto3_client_method_parameters(client, method_name, required=True)
+ if any(parameters.get(k) is None for k in required_options):
+ module.fail_json(msg='To {0} requires the parameters: {1}'.format(
+ get_rds_method_attribute(method_name, module).operation_description, required_options))
+ options = get_boto3_client_method_parameters(client, method_name)
+ parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None)
+
+ return parameters
+
+
+def ensure_snapshot_absent():
+ snapshot_name = module.params.get("db_snapshot_identifier")
+ params = {"DBSnapshotIdentifier": snapshot_name}
+ changed = False
+
+ snapshot = get_snapshot(snapshot_name)
+ if not snapshot:
+ module.exit_json(changed=changed)
+ elif snapshot and snapshot["Status"] != "deleting":
+ snapshot, changed = call_method(client, module, "delete_db_snapshot", params)
+
+ module.exit_json(changed=changed)
+
+
+def ensure_snapshot_present(params):
+ source_id = module.params.get('source_db_snapshot_identifier')
+ snapshot_name = module.params.get('db_snapshot_identifier')
+ changed = False
+ snapshot = get_snapshot(snapshot_name)
+
+ # Copy snapshot
+ if source_id:
+ changed |= copy_snapshot(params)
+
+ # Create snapshot
+ elif not snapshot:
+ changed |= create_snapshot(params)
+
+ # Snapshot exists and we're not creating a copy - modify exising snapshot
+ else:
+ changed |= modify_snapshot()
+
+ snapshot = get_snapshot(snapshot_name)
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']))
+
+
+def create_snapshot(params):
+ method_params = get_parameters(params, 'create_db_snapshot')
+ if method_params.get('Tags'):
+ method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags'])
+ snapshot, changed = call_method(client, module, 'create_db_snapshot', method_params)
+
+ return changed
+
+
+def copy_snapshot(params):
+ changed = False
+ snapshot_id = module.params.get('db_snapshot_identifier')
+ snapshot = get_snapshot(snapshot_id)
+
+ if not snapshot:
+ method_params = get_parameters(params, 'copy_db_snapshot')
+ if method_params.get('Tags'):
+ method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags'])
+ result, changed = call_method(client, module, 'copy_db_snapshot', method_params)
+
+ return changed
+
+
+def modify_snapshot():
+ # TODO - add other modifications aside from purely tags
+ changed = False
+ snapshot_id = module.params.get('db_snapshot_identifier')
+ snapshot = get_snapshot(snapshot_id)
+
+ if module.params.get('tags'):
+ changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags'])
+
+ return changed
+
+
+def main():
+ global client
+ global module
+
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True),
+ db_instance_identifier=dict(aliases=['instance_id']),
+ source_db_snapshot_identifier=dict(aliases=['source_id', 'source_snapshot_id']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ copy_tags=dict(type='bool', default=False),
+ source_region=dict(type='str'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+ try:
+ client = module.client('rds', retry_decorator=retry_decorator)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS.")
+
+ state = module.params.get("state")
+ if state == 'absent':
+ ensure_snapshot_absent()
+
+ elif state == 'present':
+ params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec))
+ ensure_snapshot_present(params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py
new file mode 100644
index 00000000..846581b8
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: rds_option_group
+short_description: Manages the creation, modification, deletion of RDS option groups
+version_added: 5.0.0
+description:
+ - Manages the creation, modification, deletion of RDS option groups.
+ - This module was originally added to C(community.aws) in release 2.1.0.
+author:
+ - "Nick Aslanidis (@naslanidis)"
+ - "Will Thames (@willthames)"
+ - "Alina Buzachis (@alinabuzachis)"
+options:
+ state:
+ description:
+ - Specifies whether the option group should be C(present) or C(absent).
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+ option_group_name:
+ description:
+ - Specifies the name of the option group to be created.
+ required: true
+ type: str
+ engine_name:
+ description:
+ - Specifies the name of the engine that this option group should be associated with.
+ type: str
+ major_engine_version:
+ description:
+ - Specifies the major version of the engine that this option group should be associated with.
+ type: str
+ option_group_description:
+ description:
+ - The description of the option group.
+ type: str
+ apply_immediately:
+ description:
+ - Indicates whether the changes should be applied immediately, or during the next maintenance window.
+ required: false
+ type: bool
+ default: false
+ options:
+ description:
+ - Options in this list are added to the option group.
+ - If already present, the specified configuration is used to update the existing configuration.
+ - If none are supplied, any existing options are removed.
+ type: list
+ elements: dict
+ suboptions:
+ option_name:
+ description: The configuration of options to include in a group.
+ required: false
+ type: str
+ port:
+ description: The optional port for the option.
+ required: false
+ type: int
+ option_version:
+ description: The version for the option.
+ required: false
+ type: str
+ option_settings:
+ description: The option settings to include in an option group.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: The name of the option that has settings that you can set.
+ required: false
+ type: str
+ value:
+ description: The current value of the option setting.
+ required: false
+ type: str
+ default_value:
+ description: The default value of the option setting.
+ required: false
+ type: str
+ description:
+ description: The description of the option setting.
+ required: false
+ type: str
+ apply_type:
+ description: The DB engine specific parameter type.
+ required: false
+ type: str
+ data_type:
+ description: The data type of the option setting.
+ required: false
+ type: str
+ allowed_values:
+ description: The allowed values of the option setting.
+ required: false
+ type: str
+ is_modifiable:
+ description: A Boolean value that, when C(true), indicates the option setting can be modified from the default.
+ required: false
+ type: bool
+ is_collection:
+ description: Indicates if the option setting is part of a collection.
+ required: false
+ type: bool
+ db_security_group_memberships:
+ description: A list of C(DBSecurityGroupMembership) name strings used for this option.
+ required: false
+ type: list
+ elements: str
+ vpc_security_group_memberships:
+ description: A list of C(VpcSecurityGroupMembership) name strings used for this option.
+ required: false
+ type: list
+ elements: str
+ wait:
+ description: Whether to wait for the cluster to be available or deleted.
+ type: bool
+ default: True
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Create an RDS Mysql Option group
+- name: Create an RDS Mysql option group
+ amazon.aws.rds_option_group:
+ state: present
+ option_group_name: test-mysql-option-group
+ engine_name: mysql
+ major_engine_version: 5.6
+ option_group_description: test mysql option group
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - "sg-d188c123"
+ option_settings:
+ - name: MAX_SIMULTANEOUS_CONNECTIONS
+ value: "20"
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: "1.25"
+ register: new_rds_mysql_option_group
+
+# Remove currently configured options for an option group by removing options argument
+- name: Create an RDS Mysql option group
+ amazon.aws.rds_option_group:
+ state: present
+ option_group_name: test-mysql-option-group
+ engine_name: mysql
+ major_engine_version: 5.6
+ option_group_description: test mysql option group
+ apply_immediately: true
+ register: rds_mysql_option_group
+
+- name: Create an RDS Mysql option group using tags
+ amazon.aws.rds_option_group:
+ state: present
+ option_group_name: test-mysql-option-group
+ engine_name: mysql
+ major_engine_version: 5.6
+ option_group_description: test mysql option group
+ apply_immediately: true
+ tags:
+ Tag1: tag1
+ Tag2: tag2
+ register: rds_mysql_option_group
+
+# Delete an RDS Mysql Option group
+- name: Delete an RDS Mysql option group
+ amazon.aws.rds_option_group:
+ state: absent
+ option_group_name: test-mysql-option-group
+ register: deleted_rds_mysql_option_group
+'''
+
+RETURN = r'''
+allows_vpc_and_non_vpc_instance_memberships:
+ description: Indicates whether this option group can be applied to both VPC and non-VPC instances.
+ returned: always
+ type: bool
+ sample: false
+changed:
+ description: If the Option Group has changed.
+ type: bool
+ returned: always
+ sample: true
+engine_name:
+ description: Indicates the name of the engine that this option group can be applied to.
+ returned: always
+ type: str
+ sample: "mysql"
+major_engine_version:
+ description: Indicates the major engine version associated with this option group.
+ returned: always
+ type: str
+ sample: "5.6"
+option_group_arn:
+ description: The Amazon Resource Name (ARN) for the option group.
+ returned: always
+ type: str
+ sample: "arn:aws:rds:ap-southeast-2:123456789012:og:ansible-test-option-group"
+option_group_description:
+ description: Provides a description of the option group.
+ returned: always
+ type: str
+ sample: "test mysql option group"
+option_group_name:
+ description: Specifies the name of the option group.
+ returned: always
+ type: str
+ sample: "test-mysql-option-group"
+options:
+ description: Indicates what options are available in the option group.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ db_security_group_memberships:
+ description: If the option requires access to a port, then this DB security group allows access to the port.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ status:
+ description: The status of the DB security group.
+ returned: always
+ type: str
+ sample: "available"
+ db_security_group_name:
+ description: The name of the DB security group.
+ returned: always
+ type: str
+ sample: "mydbsecuritygroup"
+ option_description:
+ description: The description of the option.
+ returned: always
+ type: str
+ sample: "Innodb Memcached for MySQL"
+ option_name:
+ description: The name of the option.
+ returned: always
+ type: str
+ sample: "MEMCACHED"
+ option_settings:
+ description: The name of the option.
+ returned: always
+ type: list
+ contains:
+ allowed_values:
+ description: The allowed values of the option setting.
+ returned: always
+ type: str
+ sample: "1-2048"
+ apply_type:
+ description: The DB engine specific parameter type.
+ returned: always
+ type: str
+ sample: "STATIC"
+ data_type:
+ description: The data type of the option setting.
+ returned: always
+ type: str
+ sample: "INTEGER"
+ default_value:
+ description: The default value of the option setting.
+ returned: always
+ type: str
+ sample: "1024"
+ description:
+ description: The description of the option setting.
+ returned: always
+ type: str
+ sample: "Verbose level for memcached."
+ is_collection:
+ description: Indicates if the option setting is part of a collection.
+ returned: always
+ type: bool
+ sample: true
+ is_modifiable:
+ description: A Boolean value that, when true, indicates the option setting can be modified from the default.
+ returned: always
+ type: bool
+ sample: true
+ name:
+ description: The name of the option that has settings that you can set.
+ returned: always
+ type: str
+ sample: "INNODB_API_ENABLE_MDL"
+ value:
+ description: The current value of the option setting.
+ returned: always
+ type: str
+ sample: "0"
+ permanent:
+ description: Indicate if this option is permanent.
+ returned: always
+ type: bool
+ sample: true
+ persistent:
+ description: Indicate if this option is persistent.
+ returned: always
+ type: bool
+ sample: true
+ port:
+ description: If required, the port configured for this option to use.
+ returned: always
+ type: int
+ sample: 11211
+ vpc_security_group_memberships:
+ description: If the option requires access to a port, then this VPC security group allows access to the port.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ status:
+ description: The status of the VPC security group.
+ returned: always
+ type: str
+ sample: "available"
+ vpc_security_group_id:
+ description: The name of the VPC security group.
+ returned: always
+ type: str
+ sample: "sg-0cd636a23ae76e9a4"
+vpc_id:
+ description: If present, this option group can only be applied to instances that are in the VPC indicated by this field.
+ returned: always
+ type: str
+ sample: "vpc-bf07e9d6"
+tags:
+ description: The tags associated the Internet Gateway.
+ type: dict
+ returned: always
+ sample: {
+ "Ansible": "Test"
+ }
+'''
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_option_groups(client, **params):
+ try:
+ paginator = client.get_paginator('describe_option_groups')
+ return paginator.paginate(**params).build_full_result()['OptionGroupsList'][0]
+ except is_boto3_error_code('OptionGroupNotFoundFault'):
+ return {}
+
+
+def get_option_group(client, module):
+ params = dict()
+ params['OptionGroupName'] = module.params.get('option_group_name')
+
+ try:
+ result = camel_dict_to_snake_dict(_describe_option_groups(client, **params))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe option groups.")
+
+ if result:
+ result['tags'] = get_tags(client, module, result['option_group_arn'])
+
+ return result
+
+
+def create_option_group_options(client, module):
+ changed = True
+ params = dict()
+ params['OptionGroupName'] = module.params.get('option_group_name')
+ options_to_include = module.params.get('options')
+ params['OptionsToInclude'] = snake_dict_to_camel_dict(options_to_include, capitalize_first=True)
+
+ if module.params.get('apply_immediately'):
+ params['ApplyImmediately'] = module.params.get('apply_immediately')
+
+ if module.check_mode:
+ return changed
+
+ try:
+ client.modify_option_group(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update Option Group.")
+
+ return changed
+
+
+def remove_option_group_options(client, module, options_to_remove):
+ changed = True
+ params = dict()
+ params['OptionGroupName'] = module.params.get('option_group_name')
+ params['OptionsToRemove'] = options_to_remove
+
+ if module.params.get('apply_immediately'):
+ params['ApplyImmediately'] = module.params.get('apply_immediately')
+
+ if module.check_mode:
+ return changed
+
+ try:
+ client.modify_option_group(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e)
+
+ return changed
+
+
+def create_option_group(client, module):
+ changed = True
+ params = dict()
+ params['OptionGroupName'] = module.params.get('option_group_name')
+ params['EngineName'] = module.params.get('engine_name')
+ params['MajorEngineVersion'] = str(module.params.get('major_engine_version'))
+ params['OptionGroupDescription'] = module.params.get('option_group_description')
+
+ if module.params.get('tags'):
+ params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
+ else:
+ params['Tags'] = list()
+
+ if module.check_mode:
+ return changed
+ try:
+ client.create_option_group(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to create Option Group.')
+
+ return changed
+
+
+def match_option_group_options(client, module):
+ requires_update = False
+ new_options = module.params.get('options')
+
+ # Get existing option groups and compare to our new options spec
+ current_option = get_option_group(client, module)
+
+ if current_option['options'] == [] and new_options:
+ requires_update = True
+ else:
+ for option in current_option['options']:
+ for setting_name in new_options:
+ if setting_name['option_name'] == option['option_name']:
+
+ # Security groups need to be handled separately due to different keys on request and what is
+ # returned by the API
+ if any(
+ name in option.keys() - ['option_settings', 'vpc_security_group_memberships'] and
+ setting_name[name] != option[name]
+ for name in setting_name
+ ):
+ requires_update = True
+
+ if any(
+ name in option and name == 'vpc_security_group_memberships'
+ for name in setting_name
+ ):
+ current_sg = set(sg['vpc_security_group_id'] for sg in option['vpc_security_group_memberships'])
+ new_sg = set(setting_name['vpc_security_group_memberships'])
+ if current_sg != new_sg:
+ requires_update = True
+
+ if any(
+ new_option_setting['name'] == current_option_setting['name'] and
+ new_option_setting['value'] != current_option_setting['value']
+ for new_option_setting in setting_name['option_settings']
+ for current_option_setting in option['option_settings']
+ ):
+ requires_update = True
+ else:
+ requires_update = True
+
+ return requires_update
+
+
+def compare_option_group(client, module):
+ to_be_added = None
+ to_be_removed = None
+ current_option = get_option_group(client, module)
+ new_options = module.params.get('options')
+ new_settings = set([item['option_name'] for item in new_options])
+ old_settings = set([item['option_name'] for item in current_option['options']])
+
+ if new_settings != old_settings:
+ to_be_added = list(new_settings - old_settings)
+ to_be_removed = list(old_settings - new_settings)
+
+ return to_be_added, to_be_removed
+
+
+def setup_option_group(client, module):
+ results = []
+ changed = False
+ to_be_added = None
+ to_be_removed = None
+
+ # Check if there is an existing options group
+ existing_option_group = get_option_group(client, module)
+
+ if existing_option_group:
+ results = existing_option_group
+
+ # Check tagging
+ changed |= update_tags(client, module, existing_option_group)
+
+ if module.params.get('options'):
+ # Check if existing options require updating
+ update_required = match_option_group_options(client, module)
+
+ # Check if there are options to be added or removed
+ if update_required:
+ to_be_added, to_be_removed = compare_option_group(client, module)
+
+ if to_be_added or update_required:
+ changed |= create_option_group_options(client, module)
+
+ if to_be_removed:
+ changed |= remove_option_group_options(client, module, to_be_removed)
+
+ # If changed, get updated version of option group
+ if changed:
+ results = get_option_group(client, module)
+ else:
+ # No options were supplied. If options exist, remove them
+ current_option_group = get_option_group(client, module)
+
+ if current_option_group['options'] != []:
+ # Here we would call our remove options function
+ options_to_remove = []
+
+ for option in current_option_group['options']:
+ options_to_remove.append(option['option_name'])
+
+ changed |= remove_option_group_options(client, module, options_to_remove)
+
+ # If changed, get updated version of option group
+ if changed:
+ results = get_option_group(client, module)
+ else:
+ changed = create_option_group(client, module)
+
+ if module.params.get('options'):
+ changed = create_option_group_options(client, module)
+
+ results = get_option_group(client, module)
+
+ return changed, results
+
+
+def remove_option_group(client, module):
+ changed = False
+ params = dict()
+ params['OptionGroupName'] = module.params.get('option_group_name')
+
+ # Check if there is an existing options group
+ existing_option_group = get_option_group(client, module)
+
+ if existing_option_group:
+
+ if module.check_mode:
+ return True, {}
+
+ changed = True
+ try:
+ client.delete_option_group(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete option group.")
+
+ return changed, {}
+
+
+def update_tags(client, module, option_group):
+ if module.params.get('tags') is None:
+ return False
+
+ try:
+ existing_tags = client.list_tags_for_resource(aws_retry=True, ResourceName=option_group['option_group_arn'])['TagList']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain option group tags.")
+
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags),
+ module.params['tags'], module.params['purge_tags'])
+ changed = bool(to_update or to_delete)
+
+ if to_update:
+ try:
+ if module.check_mode:
+ return changed
+ client.add_tags_to_resource(aws_retry=True, ResourceName=option_group['option_group_arn'],
+ Tags=ansible_dict_to_boto3_tag_list(to_update))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't add tags to option group.")
+ if to_delete:
+ try:
+ if module.check_mode:
+ return changed
+ client.remove_tags_from_resource(aws_retry=True, ResourceName=option_group['option_group_arn'],
+ TagKeys=to_delete)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove tags from option group.")
+
+ return changed
+
+
+def main():
+ argument_spec = dict(
+ option_group_name=dict(required=True, type='str'),
+ engine_name=dict(type='str'),
+ major_engine_version=dict(type='str'),
+ option_group_description=dict(type='str'),
+ options=dict(required=False, type='list', elements='dict'),
+ apply_immediately=dict(type='bool', default=False),
+ state=dict(required=True, choices=['present', 'absent']),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ wait=dict(type='bool', default=True),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['engine_name', 'major_engine_version', 'option_group_description']]],
+ )
+
+ try:
+ client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS.')
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ changed, results = setup_option_group(client, module)
+ else:
+ changed, results = remove_option_group(client, module)
+
+ module.exit_json(changed=changed, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py
new file mode 100644
index 00000000..46305f6f
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds_option_group_info
+short_description: rds_option_group_info module
+version_added: 5.0.0
+description:
+ - Gather information about RDS option groups.
+ - This module was originally added to C(community.aws) in release 2.1.0.
+author: "Alina Buzachis (@alinabuzachis)"
+options:
+ option_group_name:
+ description:
+ - The name of the option group to describe.
+ - Can't be supplied together with I(engine_name) or I(major_engine_version).
+ default: ''
+ required: false
+ type: str
+ marker:
+ description:
+ - If this parameter is specified, the response includes only records beyond the marker, up to the value specified by I(max_records).
+ - Allowed values are between C(20) and C(100).
+ default: ''
+ required: false
+ type: str
+ max_records:
+ description:
+ - The maximum number of records to include in the response.
+ type: int
+ default: 100
+ required: false
+ engine_name:
+ description: Filters the list of option groups to only include groups associated with a specific database engine.
+ type: str
+ default: ''
+ required: false
+ major_engine_version:
+ description:
+ - Filters the list of option groups to only include groups associated with a specific database engine version.
+ - If specified, then I(engine_name) must also be specified.
+ type: str
+ default: ''
+ required: false
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: List an option group
+ amazon.aws.rds_option_group_info:
+ option_group_name: test-mysql-option-group
+ register: option_group
+
+- name: List all the option groups
+ amazon.aws.rds_option_group_info:
+ region: ap-southeast-2
+ profile: production
+ register: option_group
+'''
+
+RETURN = r'''
+changed:
+ description: True if listing the RDS option group succeeds.
+ type: bool
+ returned: always
+ sample: false
+option_groups_list:
+ description: The available RDS option groups.
+ returned: always
+ type: complex
+ contains:
+ allows_vpc_and_non_vpc_instance_memberships:
+ description: Indicates whether this option group can be applied to both VPC and non-VPC instances.
+ returned: always
+ type: bool
+ sample: false
+ engine_name:
+ description: Indicates the name of the engine that this option group can be applied to.
+ returned: always
+ type: str
+ sample: "mysql"
+ major_engine_version:
+ description: Indicates the major engine version associated with this option group.
+ returned: always
+ type: str
+ sample: "5.6"
+ option_group_arn:
+ description: The Amazon Resource Name (ARN) for the option group.
+ returned: always
+ type: str
+ sample: "arn:aws:rds:ap-southeast-2:123456789012:og:ansible-test-option-group"
+ option_group_description:
+ description: Provides a description of the option group.
+ returned: always
+ type: str
+ sample: "test mysql option group"
+ option_group_name:
+ description: Specifies the name of the option group.
+ returned: always
+ type: str
+ sample: "test-mysql-option-group"
+ options:
+ description: Indicates what options are available in the option group.
+ returned: always
+ type: complex
+ contains:
+ db_security_group_memberships:
+ description: If the option requires access to a port, then this DB security group allows access to the port.
+ returned: always
+ type: complex
+ sample: list
+ elements: dict
+ contains:
+ status:
+ description: The status of the DB security group.
+ returned: always
+ type: str
+ sample: "available"
+ db_security_group_name:
+ description: The name of the DB security group.
+ returned: always
+ type: str
+ sample: "mydbsecuritygroup"
+ option_description:
+ description: The description of the option.
+ returned: always
+ type: str
+ sample: "Innodb Memcached for MySQL"
+ option_name:
+ description: The name of the option.
+ returned: always
+ type: str
+ sample: "MEMCACHED"
+ option_settings:
+ description: The name of the option.
+ returned: always
+ type: complex
+ contains:
+ allowed_values:
+ description: The allowed values of the option setting.
+ returned: always
+ type: str
+ sample: "1-2048"
+ apply_type:
+ description: The DB engine specific parameter type.
+ returned: always
+ type: str
+ sample: "STATIC"
+ data_type:
+ description: The data type of the option setting.
+ returned: always
+ type: str
+ sample: "INTEGER"
+ default_value:
+ description: The default value of the option setting.
+ returned: always
+ type: str
+ sample: "1024"
+ description:
+ description: The description of the option setting.
+ returned: always
+ type: str
+ sample: "Verbose level for memcached."
+ is_collection:
+ description: Indicates if the option setting is part of a collection.
+ returned: always
+ type: bool
+ sample: true
+ is_modifiable:
+ description: A Boolean value that, when true, indicates the option setting can be modified from the default.
+ returned: always
+ type: bool
+ sample: true
+ name:
+ description: The name of the option that has settings that you can set.
+ returned: always
+ type: str
+ sample: "INNODB_API_ENABLE_MDL"
+ value:
+ description: The current value of the option setting.
+ returned: always
+ type: str
+ sample: "0"
+ permanent:
+ description: Indicate if this option is permanent.
+ returned: always
+ type: bool
+ sample: true
+ persistent:
+ description: Indicate if this option is persistent.
+ returned: always
+ type: bool
+ sample: true
+ port:
+ description: If required, the port configured for this option to use.
+ returned: always
+ type: int
+ sample: 11211
+ vpc_security_group_memberships:
+ description: If the option requires access to a port, then this VPC security group allows access to the port.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ status:
+ description: The status of the VPC security group.
+ returned: always
+ type: str
+ sample: "available"
+ vpc_security_group_id:
+ description: The name of the VPC security group.
+ returned: always
+ type: str
+ sample: "sg-0cd636a23ae76e9a4"
+ vpc_id:
+ description: If present, this option group can only be applied to instances that are in the VPC indicated by this field.
+ returned: always
+ type: str
+ sample: "vpc-bf07e9d6"
+ tags:
+ description: The tags associated the Internet Gateway.
+ type: dict
+ returned: always
+ sample: {
+ "Ansible": "Test"
+ }
+
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_option_groups(client, **params):
+ try:
+ paginator = client.get_paginator('describe_option_groups')
+ return paginator.paginate(**params).build_full_result()
+ except is_boto3_error_code('OptionGroupNotFoundFault'):
+ return {}
+
+
+def list_option_groups(client, module):
+ option_groups = list()
+ params = dict()
+ params['OptionGroupName'] = module.params.get('option_group_name')
+
+ if module.params.get('marker'):
+ params['Marker'] = module.params.get('marker')
+ if int(params['Marker']) < 20 or int(params['Marker']) > 100:
+ module.fail_json(msg="marker must be between 20 and 100 minutes")
+
+ if module.params.get('max_records'):
+ params['MaxRecords'] = module.params.get('max_records')
+ if params['MaxRecords'] > 100:
+ module.fail_json(msg="The maximum number of records to include in the response is 100.")
+
+ params['EngineName'] = module.params.get('engine_name')
+ params['MajorEngineVersion'] = module.params.get('major_engine_version')
+
+ try:
+ result = _describe_option_groups(client, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe option groups.")
+
+ for option_group in result['OptionGroupsList']:
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ converted_option_group = camel_dict_to_snake_dict(option_group)
+ converted_option_group['tags'] = get_tags(client, module, converted_option_group['option_group_arn'])
+ option_groups.append(converted_option_group)
+
+ return option_groups
+
+
+def main():
+ argument_spec = dict(
+ option_group_name=dict(default='', type='str'),
+ marker=dict(type='str'),
+ max_records=dict(type='int', default=100),
+ engine_name=dict(type='str', default=''),
+ major_engine_version=dict(type='str', default=''),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['option_group_name', 'engine_name'],
+ ['option_group_name', 'major_engine_version'],
+ ],
+ required_together=[
+ ['engine_name', 'major_engine_version'],
+ ],
+ )
+
+ # Validate Requirements
+ try:
+ connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ results = list_option_groups(connection, module)
+
+ module.exit_json(result=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py
new file mode 100644
index 00000000..0bb42e0a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_param_group
+version_added: 5.0.0
+short_description: manage RDS parameter groups
+description:
+ - Creates, modifies, and deletes RDS parameter groups.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ state:
+ description:
+ - Specifies whether the group should be present or absent.
+ required: true
+ choices: [ 'present' , 'absent' ]
+ type: str
+ name:
+ description:
+ - Database parameter group identifier.
+ required: true
+ type: str
+ description:
+ description:
+ - Database parameter group description. Only set when a new group is added.
+ type: str
+ engine:
+ description:
+ - The type of database for this group.
+ - Please use following command to get list of all supported db engines and their respective versions.
+ - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"'
+ - Required for I(state=present).
+ type: str
+ immediate:
+ description:
+ - Whether to apply the changes immediately, or after the next reboot of any associated instances.
+ aliases:
+ - apply_immediately
+ type: bool
+ params:
+ description:
+ - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3),
+ or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
+ aliases: [parameters]
+ type: dict
+author:
+ - "Scott Anderson (@tastychutney)"
+ - "Will Thames (@willthames)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
+ amazon.aws.rds_param_group:
+ state: present
+ name: norwegian-blue
+ description: 'My Fancy Ex Parrot Group'
+ engine: 'mysql5.6'
+ params:
+ auto_increment_increment: "42K"
+ tags:
+ Environment: production
+ Application: parrot
+
+- name: Remove a parameter group
+ amazon.aws.rds_param_group:
+ state: absent
+ name: norwegian-blue
+'''
+
+RETURN = '''
+db_parameter_group_name:
+ description: Name of DB parameter group
+ type: str
+ returned: when state is present
+db_parameter_group_family:
+ description: DB parameter group family that this DB parameter group is compatible with.
+ type: str
+ returned: when state is present
+db_parameter_group_arn:
+ description: ARN of the DB parameter group
+ type: str
+ returned: when state is present
+description:
+ description: description of the DB parameter group
+ type: str
+ returned: when state is present
+errors:
+ description: list of errors from attempting to modify parameters that are not modifiable
+ type: list
+ returned: when state is present
+tags:
+ description: dictionary of tags
+ type: dict
+ returned: when state is present
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+
+INT_MODIFIERS = {
+ 'K': 1024,
+ 'M': pow(1024, 2),
+ 'G': pow(1024, 3),
+ 'T': pow(1024, 4),
+}
+
+
+@AWSRetry.jittered_backoff()
+def _describe_db_parameters(connection, **params):
+ try:
+ paginator = connection.get_paginator('describe_db_parameters')
+ return paginator.paginate(**params).build_full_result()
+ except is_boto3_error_code('DBParameterGroupNotFound'):
+ return None
+
+
+def convert_parameter(param, value):
+ """
+ Allows setting parameters with 10M = 10* 1024 * 1024 and so on.
+ """
+ converted_value = value
+
+ if param['DataType'] == 'integer':
+ if isinstance(value, string_types):
+ try:
+ for modifier in INT_MODIFIERS.keys():
+ if value.endswith(modifier):
+ converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
+ except ValueError:
+ # may be based on a variable (ie. {foo*3/4}) so
+ # just pass it on through to the AWS SDK
+ pass
+ elif isinstance(value, bool):
+ converted_value = 1 if value else 0
+
+ elif param['DataType'] == 'boolean':
+ if isinstance(value, string_types):
+ converted_value = value in BOOLEANS_TRUE
+ # convert True/False to 1/0
+ converted_value = 1 if converted_value else 0
+ return str(converted_value)
+
+
+def update_parameters(module, connection):
+ groupname = module.params['name']
+ desired = module.params['params']
+ apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot'
+ errors = []
+ modify_list = []
+ existing = {}
+ try:
+ _existing = _describe_db_parameters(connection, DBParameterGroupName=groupname)
+ if _existing:
+ existing = _existing['Parameters']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe existing parameter groups")
+ lookup = dict((param['ParameterName'], param) for param in existing)
+ for param_key, param_value in desired.items():
+ if param_key not in lookup:
+ errors.append("Parameter %s is not an available parameter for the %s engine" %
+ (param_key, module.params.get('engine')))
+ else:
+ converted_value = convert_parameter(lookup[param_key], param_value)
+ # engine-default parameters do not have a ParameterValue, so we'll always override those.
+ if converted_value != lookup[param_key].get('ParameterValue'):
+ if lookup[param_key]['IsModifiable']:
+ modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method))
+ else:
+ errors.append("Parameter %s is not modifiable" % param_key)
+
+ # modify_db_parameters takes at most 20 parameters
+ if modify_list and not module.check_mode:
+ try:
+ from itertools import izip_longest as zip_longest # python 2
+ except ImportError:
+ from itertools import zip_longest # python 3
+ for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None):
+ non_empty_slice = [item for item in modify_slice if item]
+ try:
+ connection.modify_db_parameter_group(aws_retry=True, DBParameterGroupName=groupname, Parameters=non_empty_slice)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't update parameters")
+ return True, errors
+ return False, errors
+
+
+def update_tags(module, connection, group, tags):
+ if tags is None:
+ return False
+ changed = False
+
+ existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'])['TagList']
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags),
+ tags, module.params['purge_tags'])
+
+ if module.check_mode:
+ if not to_update and not to_delete:
+ return False
+ else:
+ return True
+
+ if to_update:
+ try:
+ connection.add_tags_to_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'],
+ Tags=ansible_dict_to_boto3_tag_list(to_update))
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't add tags to parameter group")
+ if to_delete:
+ try:
+ connection.remove_tags_from_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'],
+ TagKeys=to_delete)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't remove tags from parameter group")
+ return changed
+
+
+def ensure_present(module, connection):
+ groupname = module.params['name']
+ tags = module.params.get('tags')
+ changed = False
+ errors = []
+ try:
+ response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname)
+ except is_boto3_error_code('DBParameterGroupNotFound'):
+ response = None
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't access parameter group information")
+ if not response:
+ params = dict(DBParameterGroupName=groupname,
+ DBParameterGroupFamily=module.params['engine'],
+ Description=module.params['description'])
+ if tags:
+ params['Tags'] = ansible_dict_to_boto3_tag_list(tags)
+ if not module.check_mode:
+ try:
+ response = connection.create_db_parameter_group(aws_retry=True, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create parameter group")
+ else:
+ group = response['DBParameterGroups'][0]
+ if tags:
+ changed = update_tags(module, connection, group, tags)
+
+ if module.params.get('params'):
+ params_changed, errors = update_parameters(module, connection)
+ changed = changed or params_changed
+
+ try:
+ response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname)
+ group = camel_dict_to_snake_dict(response['DBParameterGroups'][0])
+ except is_boto3_error_code('DBParameterGroupNotFound'):
+ module.exit_json(changed=True, errors=errors)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't obtain parameter group information")
+ try:
+ tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['db_parameter_group_arn'])['TagList']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain parameter group tags")
+ group['tags'] = boto3_tag_list_to_ansible_dict(tags)
+
+ module.exit_json(changed=changed, errors=errors, **group)
+
+
+def ensure_absent(module, connection):
+ group = module.params['name']
+ try:
+ response = connection.describe_db_parameter_groups(DBParameterGroupName=group)
+ except is_boto3_error_code('DBParameterGroupNotFound'):
+ module.exit_json(changed=False)
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't access parameter group information")
+
+ if response and module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ response = connection.delete_db_parameter_group(aws_retry=True, DBParameterGroupName=group)
+ module.exit_json(changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete parameter group")
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ engine=dict(),
+ description=dict(),
+ params=dict(aliases=['parameters'], type='dict'),
+ immediate=dict(type='bool', aliases=['apply_immediately']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ['description', 'engine']]],
+ supports_check_mode=True
+ )
+
+ try:
+ conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ state = module.params.get('state')
+ if state == 'present':
+ ensure_present(module, conn)
+ if state == 'absent':
+ ensure_absent(module, conn)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py
new file mode 100644
index 00000000..a9c69ce9
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py
@@ -0,0 +1,389 @@
+#!/usr/bin/python
+# Copyright (c) 2014-2017 Ansible Project
+# Copyright (c) 2017, 2018 Will Thames
+# Copyright (c) 2017, 2018 Michael De La Rue
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rds_snapshot_info
+version_added: 5.0.0
+short_description: obtain information about one or more RDS snapshots
+description:
+ - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora).
+ - Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ db_snapshot_identifier:
+ description:
+ - Name of an RDS (unclustered) snapshot.
+ - Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ aliases:
+ - snapshot_name
+ type: str
+ db_instance_identifier:
+ description:
+ - RDS instance name for which to find snapshots.
+ - Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ type: str
+ db_cluster_identifier:
+ description:
+ - RDS cluster name for which to find snapshots.
+ - Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier)
+ required: false
+ type: str
+ db_cluster_snapshot_identifier:
+ description:
+ - Name of an RDS cluster snapshot.
+ - Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier)
+ required: false
+ type: str
+ snapshot_type:
+ description:
+ - Type of snapshot to find.
+ - By default both automated and manual snapshots will be returned.
+ required: false
+ choices: ['automated', 'manual', 'shared', 'public']
+ type: str
+author:
+ - "Will Thames (@willthames)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = '''
+- name: Get information about an snapshot
+ amazon.aws.rds_snapshot_info:
+ db_snapshot_identifier: snapshot_name
+ register: new_database_info
+
+- name: Get all RDS snapshots for an RDS instance
+ amazon.aws.rds_snapshot_info:
+ db_instance_identifier: helloworld-rds-master
+'''
+
+RETURN = '''
+snapshots:
+ description: List of non-clustered snapshots
+ returned: When cluster parameters are not passed
+ type: complex
+ contains:
+ allocated_storage:
+ description: How many gigabytes of storage are allocated
+ returned: always
+ type: int
+ sample: 10
+ availability_zone:
+ description: The availability zone of the database from which the snapshot was taken
+ returned: always
+ type: str
+ sample: us-west-2b
+ db_instance_identifier:
+ description: Database instance identifier
+ returned: always
+ type: str
+ sample: hello-world-rds
+ db_snapshot_arn:
+ description: Snapshot ARN
+ returned: always
+ type: str
+ sample: arn:aws:rds:us-west-2:123456789012:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03
+ db_snapshot_identifier:
+ description: Snapshot name
+ returned: always
+ type: str
+ sample: rds:hello-world-rds-us1-2018-05-16-04-03
+ encrypted:
+ description: Whether the snapshot was encrypted
+ returned: always
+ type: bool
+ sample: true
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: postgres
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 9.5.10
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ instance_create_time:
+ description: Time the Instance was created
+ returned: always
+ type: str
+ sample: '2017-10-10T04:00:07.434000+00:00'
+ kms_key_id:
+ description: ID of the KMS Key encrypting the snapshot
+ returned: always
+ type: str
+ sample: arn:aws:kms:us-west-2:123456789012:key/abcd1234-1234-aaaa-0000-1234567890ab
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: postgresql-license
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: dbadmin
+ option_group_name:
+ description: Database option group name
+ returned: always
+ type: str
+ sample: default:postgres-9-5
+ percent_progress:
+ description: Percent progress of snapshot
+ returned: always
+ type: int
+ sample: 100
+ snapshot_create_time:
+ description: Time snapshot was created
+ returned: always
+ type: str
+ sample: '2018-05-16T04:03:33.871000+00:00'
+ snapshot_type:
+ description: Type of snapshot
+ returned: always
+ type: str
+ sample: automated
+ status:
+ description: Status of snapshot
+ returned: always
+ type: str
+ sample: available
+ storage_type:
+ description: Storage type of underlying DB
+ returned: always
+ type: str
+ sample: gp2
+ tags:
+ description: Snapshot tags
+ returned: when snapshot is not shared
+ type: complex
+ contains: {}
+ vpc_id:
+ description: ID of VPC containing the DB
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+cluster_snapshots:
+ description: List of cluster snapshots
+ returned: always
+ type: complex
+ contains:
+ allocated_storage:
+ description: How many gigabytes of storage are allocated
+ returned: always
+ type: int
+ sample: 1
+ availability_zones:
+ description: The availability zones of the database from which the snapshot was taken
+ returned: always
+ type: list
+ sample:
+ - ca-central-1a
+ - ca-central-1b
+ cluster_create_time:
+ description: Date and time the cluster was created
+ returned: always
+ type: str
+ sample: '2018-05-17T00:13:40.223000+00:00'
+ db_cluster_identifier:
+ description: Database cluster identifier
+ returned: always
+ type: str
+ sample: test-aurora-cluster
+ db_cluster_snapshot_arn:
+ description: ARN of the database snapshot
+ returned: always
+ type: str
+ sample: arn:aws:rds:ca-central-1:123456789012:cluster-snapshot:test-aurora-snapshot
+ db_cluster_snapshot_identifier:
+ description: Snapshot identifier
+ returned: always
+ type: str
+ sample: test-aurora-snapshot
+ engine:
+ description: Database engine
+ returned: always
+ type: str
+ sample: aurora
+ engine_version:
+ description: Database engine version
+ returned: always
+ type: str
+ sample: 5.6.10a
+ iam_database_authentication_enabled:
+ description: Whether database authentication through IAM is enabled
+ returned: always
+ type: bool
+ sample: false
+ kms_key_id:
+ description: ID of the KMS Key encrypting the snapshot
+ returned: always
+ type: str
+ sample: arn:aws:kms:ca-central-1:123456789012:key/abcd1234-abcd-1111-aaaa-0123456789ab
+ license_model:
+ description: License model
+ returned: always
+ type: str
+ sample: aurora
+ master_username:
+ description: Database master username
+ returned: always
+ type: str
+ sample: shertel
+ percent_progress:
+ description: Percent progress of snapshot
+ returned: always
+ type: int
+ sample: 0
+ port:
+ description: Database port
+ returned: always
+ type: int
+ sample: 0
+ snapshot_create_time:
+ description: Date and time when the snapshot was created
+ returned: always
+ type: str
+ sample: '2018-05-17T00:23:23.731000+00:00'
+ snapshot_type:
+ description: Type of snapshot
+ returned: always
+ type: str
+ sample: manual
+ status:
+ description: Status of snapshot
+ returned: always
+ type: str
+ sample: creating
+ storage_encrypted:
+ description: Whether the snapshot is encrypted
+ returned: always
+ type: bool
+ sample: true
+ tags:
+ description: Tags of the snapshot
+ returned: when snapshot is not shared
+ type: complex
+ contains: {}
+ vpc_id:
+ description: VPC of the database
+ returned: always
+ type: str
+ sample: vpc-abcd1234
+'''
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def common_snapshot_info(module, conn, method, prefix, params):
+ paginator = conn.get_paginator(method)
+ try:
+ results = paginator.paginate(**params).build_full_result()['%ss' % prefix]
+ except is_boto3_error_code('%sNotFound' % prefix):
+ results = []
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "trying to get snapshot information")
+
+ for snapshot in results:
+ try:
+ if snapshot['SnapshotType'] != 'shared':
+ snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix],
+ aws_retry=True)['TagList'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix])
+
+ return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results]
+
+
+def cluster_snapshot_info(module, conn):
+ snapshot_name = module.params.get('db_cluster_snapshot_identifier')
+ snapshot_type = module.params.get('snapshot_type')
+ instance_name = module.params.get('db_cluster_identifier')
+
+ params = dict()
+ if snapshot_name:
+ params['DBClusterSnapshotIdentifier'] = snapshot_name
+ if instance_name:
+ params['DBClusterIdentifier'] = instance_name
+ if snapshot_type:
+ params['SnapshotType'] = snapshot_type
+ if snapshot_type == 'public':
+ params['IncludePublic'] = True
+ elif snapshot_type == 'shared':
+ params['IncludeShared'] = True
+
+ return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params)
+
+
+def standalone_snapshot_info(module, conn):
+ snapshot_name = module.params.get('db_snapshot_identifier')
+ snapshot_type = module.params.get('snapshot_type')
+ instance_name = module.params.get('db_instance_identifier')
+
+ params = dict()
+ if snapshot_name:
+ params['DBSnapshotIdentifier'] = snapshot_name
+ if instance_name:
+ params['DBInstanceIdentifier'] = instance_name
+ if snapshot_type:
+ params['SnapshotType'] = snapshot_type
+ if snapshot_type == 'public':
+ params['IncludePublic'] = True
+ elif snapshot_type == 'shared':
+ params['IncludeShared'] = True
+
+ return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params)
+
+
+def main():
+ argument_spec = dict(
+ db_snapshot_identifier=dict(aliases=['snapshot_name']),
+ db_instance_identifier=dict(),
+ db_cluster_identifier=dict(),
+ db_cluster_snapshot_identifier=dict(),
+ snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']]
+ )
+
+ conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ results = dict()
+ if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']:
+ results['snapshots'] = standalone_snapshot_info(module, conn)
+ if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']:
+ results['cluster_snapshots'] = cluster_snapshot_info(module, conn)
+
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py
new file mode 100644
index 00000000..4aae74ac
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rds_subnet_group
+version_added: 5.0.0
+short_description: manage RDS database subnet groups
+description:
+ - Creates, modifies, and deletes RDS database subnet groups.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ state:
+ description:
+ - Specifies whether the subnet should be present or absent.
+ required: true
+ choices: [ 'present' , 'absent' ]
+ type: str
+ name:
+ description:
+ - Database subnet group identifier.
+ required: true
+ type: str
+ description:
+ description:
+ - Database subnet group description.
+ - Required when I(state=present).
+ type: str
+ subnets:
+ description:
+ - List of subnet IDs that make up the database subnet group.
+ - Required when I(state=present).
+ type: list
+ elements: str
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 3.2.0.
+author:
+ - "Scott Anderson (@tastychutney)"
+ - "Alina Buzachis (@alinabuzachis)"
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+- name: Add or change a subnet group
+ amazon.aws.rds_subnet_group:
+ state: present
+ name: norwegian-blue
+ description: My Fancy Ex Parrot Subnet Group
+ subnets:
+ - subnet-aaaaaaaa
+ - subnet-bbbbbbbb
+
+- name: Add or change a subnet group and associate tags
+ amazon.aws.rds_subnet_group:
+ state: present
+ name: norwegian-blue
+ description: My Fancy Ex Parrot Subnet Group
+ subnets:
+ - subnet-aaaaaaaa
+ - subnet-bbbbbbbb
+ tags:
+ tag1: Tag1
+ tag2: Tag2
+
+- name: Remove a subnet group
+ amazon.aws.rds_subnet_group:
+ state: absent
+ name: norwegian-blue
+'''
+
+RETURN = r'''
+changed:
+ description: True if listing the RDS subnet group succeeds.
+ type: bool
+ returned: always
+ sample: "false"
+subnet_group:
+ description: Dictionary of DB subnet group values
+ returned: I(state=present)
+ type: complex
+ contains:
+ name:
+ description: The name of the DB subnet group (maintained for backward compatibility)
+ returned: I(state=present)
+ type: str
+ sample: "ansible-test-mbp-13950442"
+ db_subnet_group_name:
+ description: The name of the DB subnet group
+ returned: I(state=present)
+ type: str
+ sample: "ansible-test-mbp-13950442"
+ description:
+ description: The description of the DB subnet group (maintained for backward compatibility)
+ returned: I(state=present)
+ type: str
+ sample: "Simple description."
+ db_subnet_group_description:
+ description: The description of the DB subnet group
+ returned: I(state=present)
+ type: str
+ sample: "Simple description."
+ vpc_id:
+ description: The VpcId of the DB subnet group
+ returned: I(state=present)
+ type: str
+ sample: "vpc-0acb0ba033ff2119c"
+ subnet_ids:
+ description: Contains a list of Subnet IDs
+ returned: I(state=present)
+ type: list
+ sample:
+ "subnet-08c94870f4480797e"
+ subnets:
+ description: Contains a list of Subnet elements (@see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups) # noqa
+ returned: I(state=present)
+ type: list
+ contains:
+ subnet_availability_zone:
+ description: Contains Availability Zone information.
+ returned: I(state=present)
+ type: dict
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ sample:
+ name: "eu-north-1b"
+ subnet_identifier:
+ description: The identifier of the subnet.
+ returned: I(state=present)
+ type: str
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ sample: "subnet-08c94870f4480797e"
+ subnet_outpost:
+ description: This value specifies the Outpost.
+ returned: I(state=present)
+ type: dict
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ sample: {}
+ subnet_status:
+ description: The status of the subnet.
+ returned: I(state=present)
+ type: str
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ sample: "Active"
+ status:
+ description: The status of the DB subnet group (maintained for backward compatibility)
+ returned: I(state=present)
+ type: str
+ sample: "Complete"
+ subnet_group_status:
+ description: The status of the DB subnet group
+ returned: I(state=present)
+ type: str
+ sample: "Complete"
+ db_subnet_group_arn:
+ description: The ARN of the DB subnet group
+ returned: I(state=present)
+ type: str
+ sample: "arn:aws:rds:eu-north-1:123456789012:subgrp:ansible-test-13950442"
+ tags:
+ description: The tags associated with the subnet group
+ returned: I(state=present)
+ type: dict
+ version_added: 3.2.0
+ version_added_collection: community.aws
+ sample:
+ tag1: Tag1
+ tag2: Tag2
+'''
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def create_result(changed, subnet_group=None):
+ if subnet_group is None:
+ return dict(
+ changed=changed
+ )
+ result_subnet_group = dict(subnet_group)
+ result_subnet_group['name'] = result_subnet_group.get(
+ 'db_subnet_group_name')
+ result_subnet_group['description'] = result_subnet_group.get(
+ 'db_subnet_group_description')
+ result_subnet_group['status'] = result_subnet_group.get(
+ 'subnet_group_status')
+ result_subnet_group['subnet_ids'] = create_subnet_list(
+ subnet_group.get('subnets'))
+ return dict(
+ changed=changed,
+ subnet_group=result_subnet_group
+ )
+
+
+@AWSRetry.jittered_backoff()
+def _describe_db_subnet_groups_with_backoff(client, **kwargs):
+ paginator = client.get_paginator('describe_db_subnet_groups')
+ return paginator.paginate(**kwargs).build_full_result()
+
+
+def get_subnet_group(client, module):
+ params = dict()
+ params['DBSubnetGroupName'] = module.params.get('name').lower()
+
+ try:
+ _result = _describe_db_subnet_groups_with_backoff(client, **params)
+ except is_boto3_error_code('DBSubnetGroupNotFoundFault'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't describe subnet groups.")
+
+ if _result:
+ result = camel_dict_to_snake_dict(_result['DBSubnetGroups'][0])
+ result['tags'] = get_tags(client, module, result['db_subnet_group_arn'])
+
+ return result
+
+
+def create_subnet_list(subnets):
+ r'''
+ Construct a list of subnet ids from a list of subnets dicts returned by boto3.
+ Parameters:
+ subnets (list): A list of subnets definitions.
+ @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups
+ Returns:
+ (list): List of subnet ids (str)
+ '''
+ subnets_ids = []
+ for subnet in subnets:
+ subnets_ids.append(subnet.get('subnet_identifier'))
+ return subnets_ids
+
+
+def main():
+ argument_spec = dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ description=dict(required=False),
+ subnets=dict(required=False, type='list', elements='str'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+ required_if = [('state', 'present', ['description', 'subnets'])]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True
+ )
+
+ state = module.params.get('state')
+ group_name = module.params.get('name').lower()
+ group_description = module.params.get('description')
+ group_subnets = module.params.get('subnets') or []
+
+ try:
+ connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, 'Failed to instantiate AWS connection.')
+
+ # Default.
+ changed = None
+ result = create_result(False)
+ tags_update = False
+ subnet_update = False
+
+ if module.params.get("tags") is not None:
+ _tags = ansible_dict_to_boto3_tag_list(module.params.get("tags"))
+ else:
+ _tags = list()
+
+ matching_groups = get_subnet_group(connection, module)
+
+ if state == 'present':
+ if matching_groups:
+ # We have one or more subnets at this point.
+
+ # Check if there is any tags update
+ tags_update = ensure_tags(
+ connection,
+ module,
+ matching_groups['db_subnet_group_arn'],
+ matching_groups['tags'],
+ module.params.get("tags"),
+ module.params['purge_tags']
+ )
+
+ # Sort the subnet groups before we compare them
+ existing_subnets = create_subnet_list(matching_groups['subnets'])
+ existing_subnets.sort()
+ group_subnets.sort()
+
+ # See if anything changed.
+ if (
+ matching_groups['db_subnet_group_name'] != group_name or
+ matching_groups['db_subnet_group_description'] != group_description or
+ existing_subnets != group_subnets
+ ):
+ if not module.check_mode:
+ # Modify existing group.
+ try:
+ connection.modify_db_subnet_group(
+ aws_retry=True,
+ DBSubnetGroupName=group_name,
+ DBSubnetGroupDescription=group_description,
+ SubnetIds=group_subnets
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, 'Failed to update a subnet group.')
+ subnet_update = True
+ else:
+ if not module.check_mode:
+ try:
+ connection.create_db_subnet_group(
+ aws_retry=True,
+ DBSubnetGroupName=group_name,
+ DBSubnetGroupDescription=group_description,
+ SubnetIds=group_subnets,
+ Tags=_tags
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, 'Failed to create a new subnet group.')
+ subnet_update = True
+ elif state == 'absent':
+ if not module.check_mode:
+ try:
+ connection.delete_db_subnet_group(aws_retry=True, DBSubnetGroupName=group_name)
+ except is_boto3_error_code('DBSubnetGroupNotFoundFault'):
+ module.exit_json(**result)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, 'Failed to delete a subnet group.')
+ else:
+ subnet_group = get_subnet_group(connection, module)
+ if subnet_group:
+ subnet_update = True
+ result = create_result(subnet_update, subnet_group)
+ module.exit_json(**result)
+
+ subnet_update = True
+
+ subnet_group = get_subnet_group(connection, module)
+ changed = tags_update or subnet_update
+ result = create_result(changed, subnet_group)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/route53.py b/ansible_collections/amazon/aws/plugins/modules/route53.py
new file mode 100644
index 00000000..3ac32176
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/route53.py
@@ -0,0 +1,797 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: route53
+version_added: 5.0.0
+short_description: add or delete entries in Amazons Route 53 DNS service
+description:
+ - Creates and deletes DNS records in Amazons Route 53 service.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ state:
+ description:
+ - Specifies the state of the resource record.
+ required: true
+ aliases: [ 'command' ]
+ choices: [ 'present', 'absent', 'get', 'create', 'delete' ]
+ type: str
+ zone:
+ description:
+ - The DNS zone to modify.
+ - This is a required parameter, if parameter I(hosted_zone_id) is not supplied.
+ type: str
+ hosted_zone_id:
+ description:
+ - The Hosted Zone ID of the DNS zone to modify.
+ - This is a required parameter, if parameter I(zone) is not supplied.
+ type: str
+ record:
+ description:
+ - The full DNS record to create or delete.
+ required: true
+ type: str
+ ttl:
+ description:
+ - The TTL, in second, to give the new record.
+ - Mutually exclusive with I(alias).
+ default: 3600
+ type: int
+ type:
+ description:
+ - The type of DNS record to create.
+ required: true
+ choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'SOA' ]
+ type: str
+ alias:
+ description:
+ - Indicates if this is an alias record.
+ - Mutually exclusive with I(ttl).
+ - Defaults to C(false).
+ type: bool
+ alias_hosted_zone_id:
+ description:
+ - The hosted zone identifier.
+ type: str
+ alias_evaluate_target_health:
+ description:
+ - Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers.
+ type: bool
+ default: false
+ value:
+ description:
+ - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records.
+ type: list
+ elements: str
+ overwrite:
+ description:
+ - Whether an existing record should be overwritten on create if values do not match.
+ type: bool
+ retry_interval:
+ description:
+ - In the case that Route 53 is still servicing a prior request, this module will wait and try again after this many seconds.
+ If you have many domain names, the default of C(500) seconds may be too long.
+ default: 500
+ type: int
+ private_zone:
+ description:
+ - If set to C(true), the private zone matching the requested name within the domain will be used if there are both public and private zones.
+ - The default is to use the public zone.
+ type: bool
+ default: false
+ identifier:
+ description:
+ - Have to be specified for Weighted, latency-based and failover resource record sets only.
+ An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type.
+ type: str
+ weight:
+ description:
+ - Weighted resource record sets only. Among resource record sets that
+ have the same combination of DNS name and type, a value that
+ determines what portion of traffic for the current resource record set
+ is routed to the associated location.
+ - Mutually exclusive with I(region) and I(failover).
+ type: int
+ region:
+ description:
+ - Latency-based resource record sets only Among resource record sets
+ that have the same combination of DNS name and type, a value that
+ determines which region this should be associated with for the
+ latency-based routing
+ - Mutually exclusive with I(weight) and I(failover).
+ type: str
+ geo_location:
+ description:
+ - Allows to control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query.
+ - Two geolocation resource record sets that specify same geographic location cannot be created.
+ - Non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation
+ resource record sets cannot be created.
+ suboptions:
+ continent_code:
+ description:
+ - The two-letter code for the continent.
+ - Specifying I(continent_code) with either I(country_code) or I(subdivision_code) returns an InvalidInput error.
+ type: str
+ country_code:
+ description:
+ - The two-letter code for a country.
+ - Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2 .
+ type: str
+ subdivision_code:
+ description:
+ - The two-letter code for a state of the United States.
+ - To specify I(subdivision_code), I(country_code) must be set to C(US).
+ type: str
+ type: dict
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ health_check:
+ description:
+ - Health check to associate with this record
+ type: str
+ failover:
+ description:
+ - Failover resource record sets only. Whether this is the primary or
+ secondary resource record set. Allowed values are PRIMARY and SECONDARY
+ - Mutually exclusive with I(weight) and I(region).
+ type: str
+ choices: ['SECONDARY', 'PRIMARY']
+ vpc_id:
+ description:
+ - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC."
+ - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs.
+ type: str
+ wait:
+ description:
+ - Wait until the changes have been replicated to all Amazon Route 53 DNS servers.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long to wait for the changes to be replicated, in seconds.
+ default: 300
+ type: int
+author:
+ - Bruce Pennypacker (@bpennypacker)
+ - Mike Buzzetti (@jimbydamonk)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.boto3
+'''
+
+RETURN = r'''
+nameservers:
+ description: Nameservers associated with the zone.
+ returned: when state is 'get'
+ type: list
+ sample:
+ - ns-1036.awsdns-00.org.
+ - ns-516.awsdns-00.net.
+ - ns-1504.awsdns-00.co.uk.
+ - ns-1.awsdns-00.com.
+set:
+ description: Info specific to the resource record.
+ returned: when state is 'get'
+ type: complex
+ contains:
+ alias:
+ description: Whether this is an alias.
+ returned: always
+ type: bool
+ sample: false
+ failover:
+ description: Whether this is the primary or secondary resource record set.
+ returned: always
+ type: str
+ sample: PRIMARY
+ geo_location:
+ description: geograpic location based on which Route53 resonds to DNS queries.
+ returned: when configured
+ type: dict
+ sample: { continent_code: "NA", country_code: "US", subdivision_code: "CA" }
+ version_added: 3.3.0
+ version_added_collection: community.aws
+ health_check:
+ description: health_check associated with this record.
+ returned: always
+ type: str
+ identifier:
+ description: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type.
+ returned: always
+ type: str
+ record:
+ description: Domain name for the record set.
+ returned: always
+ type: str
+ sample: new.foo.com.
+ region:
+ description: Which region this should be associated with for latency-based routing.
+ returned: always
+ type: str
+ sample: us-west-2
+ ttl:
+ description: Resource record cache TTL.
+ returned: always
+ type: str
+ sample: '3600'
+ type:
+ description: Resource record set type.
+ returned: always
+ type: str
+ sample: A
+ value:
+ description: Record value.
+ returned: always
+ type: str
+ sample: 52.43.18.27
+ values:
+ description: Record Values.
+ returned: always
+ type: list
+ sample:
+ - 52.43.18.27
+ weight:
+ description: Weight of the record.
+ returned: always
+ type: str
+ sample: '3'
+ zone:
+ description: Zone this record set belongs to.
+ returned: always
+ type: str
+ sample: foo.bar.com.
+'''
+
+EXAMPLES = r'''
+- name: Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: new.foo.com
+ type: A
+ ttl: 7200
+ value: 1.1.1.1,2.2.2.2,3.3.3.3
+ wait: true
+- name: Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: new.foo.com
+ type: A
+ ttl: 7200
+ value:
+ - 1.1.1.1
+ - 2.2.2.2
+ - 3.3.3.3
+ wait: true
+- name: Retrieve the details for new.foo.com
+ amazon.aws.route53:
+ state: get
+ zone: foo.com
+ record: new.foo.com
+ type: A
+ register: rec
+- name: Delete new.foo.com A record using the results from the get command
+ amazon.aws.route53:
+ state: absent
+ zone: foo.com
+ record: "{{ rec.set.record }}"
+ ttl: "{{ rec.set.ttl }}"
+ type: "{{ rec.set.type }}"
+ value: "{{ rec.set.value }}"
+# Add an AAAA record. Note that because there are colons in the value
+# that the IPv6 address must be quoted. Also shows using the old form command=create.
+- name: Add an AAAA record
+ amazon.aws.route53:
+ command: create
+ zone: foo.com
+ record: localhost.foo.com
+ type: AAAA
+ ttl: 7200
+ value: "::1"
+# For more information on SRV records see:
+# https://en.wikipedia.org/wiki/SRV_record
+- name: Add a SRV record with multiple fields for a service on port 22222
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: "_example-service._tcp.foo.com"
+ type: SRV
+ value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com"
+# Note that TXT and SPF records must be surrounded
+# by quotes when sent to Route 53:
+- name: Add a TXT record.
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: localhost.foo.com
+ type: TXT
+ ttl: 7200
+ value: '"bar"'
+- name: Add an alias record that points to an Amazon ELB
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: elb.foo.com
+ type: A
+ value: "{{ elb_dns_name }}"
+ alias: True
+ alias_hosted_zone_id: "{{ elb_zone_id }}"
+- name: Retrieve the details for elb.foo.com
+ amazon.aws.route53:
+ state: get
+ zone: foo.com
+ record: elb.foo.com
+ type: A
+ register: rec
+- name: Delete an alias record using the results from the get command
+ amazon.aws.route53:
+ state: absent
+ zone: foo.com
+ record: "{{ rec.set.record }}"
+ ttl: "{{ rec.set.ttl }}"
+ type: "{{ rec.set.type }}"
+ value: "{{ rec.set.value }}"
+ alias: True
+ alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}"
+- name: Add an alias record that points to an Amazon ELB and evaluates it health
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: elb.foo.com
+ type: A
+ value: "{{ elb_dns_name }}"
+ alias: True
+ alias_hosted_zone_id: "{{ elb_zone_id }}"
+ alias_evaluate_target_health: True
+- name: Add an AAAA record with Hosted Zone ID
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ hosted_zone_id: Z2AABBCCDDEEFF
+ record: localhost.foo.com
+ type: AAAA
+ ttl: 7200
+ value: "::1"
+- name: Use a routing policy to distribute traffic
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: www.foo.com
+ type: CNAME
+ value: host1.foo.com
+ ttl: 30
+ # Routing policy
+ identifier: "host1@www"
+ weight: 100
+ health_check: "d994b780-3150-49fd-9205-356abdd42e75"
+- name: Add a CAA record (RFC 6844)
+ amazon.aws.route53:
+ state: present
+ zone: example.com
+ record: example.com
+ type: CAA
+ value:
+ - 0 issue "ca.example.net"
+ - 0 issuewild ";"
+ - 0 iodef "mailto:security@example.com"
+- name: Create a record with geo_location - country_code
+ amazon.aws.route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: 'geo-test.{{ zone_one }}'
+ identifier: "geohost@www"
+ type: A
+ value: 1.1.1.1
+ ttl: 30
+ geo_location:
+ country_code: US
+- name: Create a record with geo_location - subdivision code
+ amazon.aws.route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: 'geo-test.{{ zone_one }}'
+ identifier: "geohost@www"
+ type: A
+ value: 1.1.1.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ subdivision_code: TX
+'''
+
+from operator import itemgetter
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing
+WAIT_RETRY = 5 # how many seconds to wait between propagation status polls
+
+
+@AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES)
+def _list_record_sets(route53, **kwargs):
+ paginator = route53.get_paginator('list_resource_record_sets')
+ return paginator.paginate(**kwargs).build_full_result()['ResourceRecordSets']
+
+
+@AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES)
+def _list_hosted_zones(route53, **kwargs):
+ paginator = route53.get_paginator('list_hosted_zones')
+ return paginator.paginate(**kwargs).build_full_result()['HostedZones']
+
+
+def get_record(route53, zone_id, record_name, record_type, record_identifier):
+ record_sets_results = _list_record_sets(route53, HostedZoneId=zone_id)
+
+ for record_set in record_sets_results:
+ record_set['Name'] = record_set['Name'].encode().decode('unicode_escape')
+ # If the record name and type is not equal, move to the next record
+ if (record_name.lower(), record_type) != (record_set['Name'].lower(), record_set['Type']):
+ continue
+
+ if record_identifier and record_identifier != record_set.get("SetIdentifier"):
+ continue
+
+ return record_set
+
+ return None
+
+
+def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id):
+ """Finds a zone by name or zone_id"""
+ hosted_zones_results = _list_hosted_zones(route53)
+
+ for zone in hosted_zones_results:
+ # only save this zone id if the private status of the zone matches
+ # the private_zone_in boolean specified in the params
+ private_zone = module.boolean(zone['Config'].get('PrivateZone', False))
+ zone_id = zone['Id'].replace("/hostedzone/", "")
+
+ if private_zone == want_private and zone['Name'] == zone_name:
+ if want_vpc_id:
+ # NOTE: These details aren't available in other boto3 methods, hence the necessary
+ # extra API call
+ hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone_id)
+ if want_vpc_id in [v['VPCId'] for v in hosted_zone['VPCs']]:
+ return zone_id
+ else:
+ return zone_id
+ return None
+
+
+def format_record(record_in, zone_in, zone_id):
+ """
+ Formats a record in a way that's consistent with the pre-boto3 migration values
+ as well as returning the 'normal' boto3 style values
+ """
+ if not record_in:
+ return None
+
+ record = dict(record_in)
+ record['zone'] = zone_in
+ record['hosted_zone_id'] = zone_id
+
+ record['type'] = record_in.get('Type', None)
+ record['record'] = record_in.get('Name').encode().decode('unicode_escape')
+ record['ttl'] = record_in.get('TTL', None)
+ record['identifier'] = record_in.get('SetIdentifier', None)
+ record['weight'] = record_in.get('Weight', None)
+ record['region'] = record_in.get('Region', None)
+ record['failover'] = record_in.get('Failover', None)
+ record['health_check'] = record_in.get('HealthCheckId', None)
+
+ if record['ttl']:
+ record['ttl'] = str(record['ttl'])
+ if record['weight']:
+ record['weight'] = str(record['weight'])
+ if record['region']:
+ record['region'] = str(record['region'])
+
+ if record_in.get('AliasTarget'):
+ record['alias'] = True
+ record['value'] = record_in['AliasTarget'].get('DNSName')
+ record['values'] = [record_in['AliasTarget'].get('DNSName')]
+ record['alias_hosted_zone_id'] = record_in['AliasTarget'].get('HostedZoneId')
+ record['alias_evaluate_target_health'] = record_in['AliasTarget'].get('EvaluateTargetHealth')
+ else:
+ record['alias'] = False
+ records = [r.get('Value') for r in record_in.get('ResourceRecords')]
+ record['value'] = ','.join(sorted(records))
+ record['values'] = sorted(records)
+
+ return record
+
+
+def get_hosted_zone_nameservers(route53, zone_id):
+ hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['HostedZone']['Name']
+ resource_records_sets = _list_record_sets(route53, HostedZoneId=zone_id)
+
+ nameservers_records = list(
+ filter(lambda record: record['Name'] == hosted_zone_name and record['Type'] == 'NS', resource_records_sets)
+ )[0]['ResourceRecords']
+
+ return [ns_record['Value'] for ns_record in nameservers_records]
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']),
+ zone=dict(type='str'),
+ hosted_zone_id=dict(type='str'),
+ record=dict(type='str', required=True),
+ ttl=dict(type='int', default=3600),
+ type=dict(type='str', required=True, choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']),
+ alias=dict(type='bool'),
+ alias_hosted_zone_id=dict(type='str'),
+ alias_evaluate_target_health=dict(type='bool', default=False),
+ value=dict(type='list', elements='str'),
+ overwrite=dict(type='bool'),
+ retry_interval=dict(type='int', default=500),
+ private_zone=dict(type='bool', default=False),
+ identifier=dict(type='str'),
+ weight=dict(type='int'),
+ region=dict(type='str'),
+ geo_location=dict(type='dict',
+ options=dict(
+ continent_code=dict(type="str"),
+ country_code=dict(type="str"),
+ subdivision_code=dict(type="str")),
+ required=False),
+ health_check=dict(type='str'),
+ failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']),
+ vpc_id=dict(type='str'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['zone', 'hosted_zone_id']],
+ # If alias is True then you must specify alias_hosted_zone as well
+ required_together=[['alias', 'alias_hosted_zone_id']],
+ # state=present, absent, create, delete THEN value is required
+ required_if=(
+ ('state', 'present', ['value']),
+ ('state', 'create', ['value']),
+ ),
+ # failover, region and weight are mutually exclusive
+ mutually_exclusive=[
+ ('failover', 'region', 'weight'),
+ ('alias', 'ttl'),
+ ],
+ # failover, region, weight and geo_location require identifier
+ required_by=dict(
+ failover=('identifier',),
+ region=('identifier',),
+ weight=('identifier',),
+ geo_location=('identifier'),
+ ),
+ )
+
+ if module.params['state'] in ('present', 'create'):
+ command_in = 'create'
+ elif module.params['state'] in ('absent', 'delete'):
+ command_in = 'delete'
+ elif module.params['state'] == 'get':
+ command_in = 'get'
+
+ zone_in = (module.params.get('zone') or '').lower()
+ hosted_zone_id_in = module.params.get('hosted_zone_id')
+ ttl_in = module.params.get('ttl')
+ record_in = module.params.get('record').lower()
+ type_in = module.params.get('type')
+ value_in = module.params.get('value') or []
+ alias_in = module.params.get('alias')
+ alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
+ alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health')
+ retry_interval_in = module.params.get('retry_interval')
+
+ if module.params['vpc_id'] is not None:
+ private_zone_in = True
+ else:
+ private_zone_in = module.params.get('private_zone')
+
+ identifier_in = module.params.get('identifier')
+ weight_in = module.params.get('weight')
+ region_in = module.params.get('region')
+ health_check_in = module.params.get('health_check')
+ failover_in = module.params.get('failover')
+ vpc_id_in = module.params.get('vpc_id')
+ wait_in = module.params.get('wait')
+ wait_timeout_in = module.params.get('wait_timeout')
+ geo_location = module.params.get('geo_location')
+
+ if zone_in[-1:] != '.':
+ zone_in += "."
+
+ if record_in[-1:] != '.':
+ record_in += "."
+
+ if command_in == 'create' or command_in == 'delete':
+ if alias_in and len(value_in) != 1:
+ module.fail_json(msg="parameter 'value' must contain a single dns name for alias records")
+ if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None:
+ module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.")
+
+ retry_decorator = AWSRetry.jittered_backoff(
+ retries=MAX_AWS_RETRIES,
+ delay=retry_interval_in,
+ catch_extra_error_codes=['PriorRequestNotComplete'],
+ max_delay=max(60, retry_interval_in),
+ )
+
+ # connect to the route53 endpoint
+ try:
+ route53 = module.client('route53', retry_decorator=retry_decorator)
+ except botocore.exceptions.HTTPClientError as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # Find the named zone ID
+ zone_id = hosted_zone_id_in or get_zone_id_by_name(route53, module, zone_in, private_zone_in, vpc_id_in)
+
+ # Verify that the requested zone is already defined in Route53
+ if zone_id is None:
+ errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in)
+ module.fail_json(msg=errmsg)
+
+ aws_record = get_record(route53, zone_id, record_in, type_in, identifier_in)
+
+ resource_record_set = scrub_none_parameters({
+ 'Name': record_in,
+ 'Type': type_in,
+ 'Weight': weight_in,
+ 'Region': region_in,
+ 'Failover': failover_in,
+ 'TTL': ttl_in,
+ 'ResourceRecords': [dict(Value=value) for value in value_in],
+ 'HealthCheckId': health_check_in,
+ 'SetIdentifier': identifier_in,
+ })
+
+ if geo_location:
+ continent_code = geo_location.get('continent_code')
+ country_code = geo_location.get('country_code')
+ subdivision_code = geo_location.get('subdivision_code')
+
+ if continent_code and (country_code or subdivision_code):
+ module.fail_json(changed=False, msg='While using geo_location, continent_code is mutually exclusive with country_code and subdivision_code.')
+
+ if not any([continent_code, country_code, subdivision_code]):
+ module.fail_json(changed=False, msg='To use geo_location please specify either continent_code, country_code, or subdivision_code.')
+
+ if geo_location.get('subdivision_code') and geo_location.get('country_code').lower() != 'us':
+ module.fail_json(changed=False, msg='To use subdivision_code, you must specify country_code as US.')
+
+ # Build geo_location suboptions specification
+ resource_record_set['GeoLocation'] = {}
+ if continent_code:
+ resource_record_set['GeoLocation']['ContinentCode'] = continent_code
+ if country_code:
+ resource_record_set['GeoLocation']['CountryCode'] = country_code
+ if subdivision_code:
+ resource_record_set['GeoLocation']['SubdivisionCode'] = subdivision_code
+
+ if command_in == 'delete' and aws_record is not None:
+ resource_record_set['TTL'] = aws_record.get('TTL')
+ if not resource_record_set['ResourceRecords']:
+ resource_record_set['ResourceRecords'] = aws_record.get('ResourceRecords')
+
+ if alias_in:
+ resource_record_set['AliasTarget'] = dict(
+ HostedZoneId=alias_hosted_zone_id_in,
+ DNSName=value_in[0],
+ EvaluateTargetHealth=alias_evaluate_target_health_in
+ )
+ if 'ResourceRecords' in resource_record_set:
+ del resource_record_set['ResourceRecords']
+ if 'TTL' in resource_record_set:
+ del resource_record_set['TTL']
+
+ # On CAA records order doesn't matter
+ if type_in == 'CAA':
+ resource_record_set['ResourceRecords'] = sorted(resource_record_set['ResourceRecords'], key=itemgetter('Value'))
+ if aws_record:
+ aws_record['ResourceRecords'] = sorted(aws_record['ResourceRecords'], key=itemgetter('Value'))
+
+ if command_in == 'create' and aws_record == resource_record_set:
+ rr_sets = [camel_dict_to_snake_dict(resource_record_set)]
+ module.exit_json(changed=False, resource_records_sets=rr_sets)
+
+ if command_in == 'get':
+ if type_in == 'NS':
+ ns = aws_record.get('values', [])
+ else:
+ # Retrieve name servers associated to the zone.
+ ns = get_hosted_zone_nameservers(route53, zone_id)
+
+ formatted_aws = format_record(aws_record, zone_in, zone_id)
+
+ if formatted_aws is None:
+ # record does not exist
+ module.exit_json(changed=False, set=[], nameservers=ns, resource_record_sets=[])
+
+ rr_sets = [camel_dict_to_snake_dict(aws_record)]
+ module.exit_json(changed=False, set=formatted_aws, nameservers=ns, resource_record_sets=rr_sets)
+
+ if command_in == 'delete' and not aws_record:
+ module.exit_json(changed=False)
+
+ if command_in == 'create' or command_in == 'delete':
+ if command_in == 'create' and aws_record:
+ if not module.params['overwrite']:
+ module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it")
+ command = 'UPSERT'
+ else:
+ command = command_in.upper()
+
+ if not module.check_mode:
+ try:
+ change_resource_record_sets = route53.change_resource_record_sets(
+ aws_retry=True,
+ HostedZoneId=zone_id,
+ ChangeBatch=dict(
+ Changes=[
+ dict(
+ Action=command,
+ ResourceRecordSet=resource_record_set
+ )
+ ]
+ )
+ )
+
+ if wait_in:
+ waiter = get_waiter(route53, 'resource_record_sets_changed')
+ waiter.wait(
+ Id=change_resource_record_sets['ChangeInfo']['Id'],
+ WaiterConfig=dict(
+ Delay=WAIT_RETRY,
+ MaxAttempts=wait_timeout_in // WAIT_RETRY,
+ )
+ )
+ except is_boto3_error_message('but it already exists'):
+ module.exit_json(changed=False)
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='Timeout waiting for resource records changes to be applied')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to update records')
+ except Exception as e:
+ module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))
+
+ rr_sets = [camel_dict_to_snake_dict(resource_record_set)]
+ formatted_aws = format_record(aws_record, zone_in, zone_id)
+ formatted_record = format_record(resource_record_set, zone_in, zone_id)
+
+ module.exit_json(
+ changed=True,
+ diff=dict(
+ before=formatted_aws,
+ after=formatted_record if command_in != 'delete' else {},
+ resource_record_sets=rr_sets,
+ ),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py
new file mode 100644
index 00000000..35287a79
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py
@@ -0,0 +1,650 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: route53_health_check
+version_added: 5.0.0
+short_description: Manage health-checks in Amazons Route53 DNS service
+description:
+ - Creates and deletes DNS Health checks in Amazons Route53 service.
+ - Only the port, resource_path, string_match and request_interval are
+ considered when updating existing health-checks.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ state:
+ description:
+ - Specifies the action to take.
+ choices: [ 'present', 'absent' ]
+ type: str
+ default: 'present'
+ disabled:
+ description:
+ - Stops Route 53 from performing health checks.
+ - See the AWS documentation for more details on the exact implications.
+ U(https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-values.html)
+ - Defaults to C(true) when creating a new health check.
+ type: bool
+ version_added: 2.1.0
+ version_added_collection: community.aws
+ ip_address:
+ description:
+ - IP address of the end-point to check. Either this or I(fqdn) has to be provided.
+ - IP addresses must be publicly routable.
+ type: str
+ port:
+ description:
+ - The port on the endpoint on which you want Amazon Route 53 to perform
+ health checks. Required for TCP checks.
+ type: int
+ type:
+ description:
+ - The type of health check that you want to create, which indicates how
+ Amazon Route 53 determines whether an endpoint is healthy.
+ - Once health_check is created, type can not be changed.
+ choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ]
+ type: str
+ resource_path:
+ description:
+ - The path that you want Amazon Route 53 to request when performing
+ health checks. The path can be any value for which your endpoint will
+ return an HTTP status code of 2xx or 3xx when the endpoint is healthy,
+ for example the file /docs/route53-health-check.html.
+ - Mutually exclusive with I(type='TCP').
+ - The path must begin with a /
+ - Maximum 255 characters.
+ type: str
+ fqdn:
+ description:
+ - Domain name of the endpoint to check. Either this or I(ip_address) has
+ to be provided. When both are given the I(fqdn) is used in the C(Host:)
+ header of the HTTP request.
+ type: str
+ string_match:
+ description:
+ - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string
+ that you want Amazon Route 53 to search for in the response body from
+ the specified resource. If the string appears in the first 5120 bytes
+ of the response body, Amazon Route 53 considers the resource healthy.
+ type: str
+ request_interval:
+ description:
+ - The number of seconds between the time that Amazon Route 53 gets a
+ response from your endpoint and the time that it sends the next
+ health-check request.
+ default: 30
+ choices: [ 10, 30 ]
+ type: int
+ failure_threshold:
+ description:
+ - The number of consecutive health checks that an endpoint must pass or
+ fail for Amazon Route 53 to change the current status of the endpoint
+ from unhealthy to healthy or vice versa.
+ - Will default to C(3) if not specified on creation.
+ choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
+ type: int
+ health_check_name:
+ description:
+ - Name of the Health Check.
+ - Used together with I(use_unique_names) to set/make use of I(health_check_name) as a unique identifier.
+ type: str
+ required: False
+ aliases: ['name']
+ version_added: 4.1.0
+ version_added_collection: community.aws
+ use_unique_names:
+ description:
+ - Used together with I(health_check_name) to set/make use of I(health_check_name) as a unique identifier.
+ type: bool
+ required: False
+ version_added: 4.1.0
+ version_added_collection: community.aws
+ health_check_id:
+ description:
+ - ID of the health check to be update or deleted.
+ - If provided, a health check can be updated or deleted based on the ID as unique identifier.
+ type: str
+ required: False
+ aliases: ['id']
+ version_added: 4.1.0
+ version_added_collection: community.aws
+author:
+ - "zimbatm (@zimbatm)"
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 2.1.0.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Create a health-check for host1.example.com and use it in record
+ amazon.aws.route53_health_check:
+ state: present
+ fqdn: host1.example.com
+ type: HTTP_STR_MATCH
+ resource_path: /
+ string_match: "Hello"
+ request_interval: 10
+ failure_threshold: 2
+ register: my_health_check
+
+- amazon.aws.route53:
+ action: create
+ zone: "example.com"
+ type: CNAME
+ record: "www.example.com"
+ value: host1.example.com
+ ttl: 30
+ # Routing policy
+ identifier: "host1@www"
+ weight: 100
+ health_check: "{{ my_health_check.health_check.id }}"
+
+- name: create a simple health check with health_check_name as unique identifier
+ amazon.aws.route53_health_check:
+ state: present
+ health_check_name: ansible
+ fqdn: ansible.com
+ port: 443
+ type: HTTPS
+ use_unique_names: true
+
+- name: Delete health-check
+ amazon.aws.route53_health_check:
+ state: absent
+ fqdn: host1.example.com
+
+- name: Update Health check by ID - update ip_address
+ amazon.aws.route53_health_check:
+ id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx
+ ip_address: 1.2.3.4
+
+- name: Update Health check by ID - update port
+ amazon.aws.route53_health_check:
+ id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx
+ ip_address: 8080
+
+- name: Delete Health check by ID
+ amazon.aws.route53_health_check:
+ state: absent
+ id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx
+
+'''
+
+RETURN = r'''
+health_check:
+ description: Information about the health check.
+ returned: success
+ type: dict
+ contains:
+ action:
+ description: The action performed by the module.
+ type: str
+ returned: When a change is or would be made.
+ sample: 'updated'
+ id:
+ description: The Unique ID assigned by AWS to the health check.
+ type: str
+ returned: When the health check exists.
+ sample: 50ec8a13-9623-4c66-9834-dd8c5aedc9ba
+ health_check_version:
+ description: The version number of the health check.
+ type: int
+ returned: When the health check exists.
+ sample: 14
+ health_check_config:
+ description:
+ - Detailed information about the health check.
+ - May contain additional values from Route 53 health check
+ features not yet supported by this module.
+ type: dict
+ returned: When the health check exists.
+ contains:
+ type:
+ description: The type of the health check.
+ type: str
+ returned: When the health check exists.
+ sample: 'HTTPS_STR_MATCH'
+ failure_threshold:
+ description:
+ - The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to
+ change the current status of the endpoint from unhealthy to healthy or vice versa.
+ type: int
+ returned: When the health check exists.
+ sample: 3
+ fully_qualified_domain_name:
+ description: The FQDN configured for the health check to test.
+ type: str
+ returned: When the health check exists and an FQDN is configured.
+ sample: 'updated'
+ ip_address:
+ description: The IPv4 or IPv6 IP address of the endpoint to be queried.
+ type: str
+ returned: When the health check exists and a specific IP address is configured.
+ sample: ''
+ port:
+ description: The port on the endpoint that the health check will query.
+ type: str
+ returned: When the health check exists.
+ sample: 'updated'
+ request_interval:
+ description: The number of seconds between health check queries.
+ type: int
+ returned: When the health check exists.
+ sample: 30
+ resource_path:
+ description: The URI path to query when performing an HTTP/HTTPS based health check.
+ type: str
+ returned: When the health check exists and a resource path has been configured.
+ sample: '/healthz'
+ search_string:
+ description: A string that must be present in the response for a health check to be considered successful.
+ type: str
+ returned: When the health check exists and a search string has been configured.
+ sample: 'ALIVE'
+ disabled:
+ description: Whether the health check has been disabled or not.
+ type: bool
+ returned: When the health check exists.
+ sample: false
+ tags:
+ description: A dictionary representing the tags on the health check.
+ type: dict
+ returned: When the health check exists.
+ sample: '{"my_key": "my_value"}'
+'''
+
+import uuid
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by HAS_BOTO
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.route53 import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags
+
+
+def _list_health_checks(**params):
+ try:
+ results = client.list_health_checks(aws_retry=True, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to list health checks')
+ return results
+
+
+def find_health_check(ip_addr, fqdn, hc_type, request_interval, port):
+ """Searches for health checks that have the exact same set of immutable values"""
+
+ # In lieu of an Id we perform matches against the following values:
+ # - ip_addr
+ # - fqdn
+ # - type (immutable)
+ # - request_interval
+ # - port
+
+ # Because the list and route53 provides no 'filter' mechanism,
+ # the using a paginator would result in (on average) double the
+ # number of API calls and can get really slow.
+ # Additionally, we can't properly wrap the paginator, so retrying means
+ # starting from scratch with a paginator
+ results = _list_health_checks()
+ while True:
+ for check in results.get('HealthChecks'):
+ config = check.get('HealthCheckConfig')
+ if (
+ config.get('IPAddress', None) == ip_addr and
+ config.get('FullyQualifiedDomainName', None) == fqdn and
+ config.get('Type') == hc_type and
+ config.get('RequestInterval') == request_interval and
+ config.get('Port', None) == port
+ ):
+ return check
+
+ if results.get('IsTruncated', False):
+ results = _list_health_checks(Marker=results.get('NextMarker'))
+ else:
+ return None
+
+
+def get_existing_checks_with_name():
+ results = _list_health_checks()
+ health_checks_with_name = {}
+ while True:
+ for check in results.get('HealthChecks'):
+ if 'Name' in describe_health_check(check['Id'])['tags']:
+ check_name = describe_health_check(check['Id'])['tags']['Name']
+ health_checks_with_name[check_name] = check
+ if results.get('IsTruncated', False):
+ results = _list_health_checks(Marker=results.get('NextMarker'))
+ else:
+ return health_checks_with_name
+
+
+def delete_health_check(check_id):
+ if not check_id:
+ return False, None
+
+ if module.check_mode:
+ return True, 'delete'
+
+ try:
+ client.delete_health_check(
+ aws_retry=True,
+ HealthCheckId=check_id,
+ )
+ except is_boto3_error_code('NoSuchHealthCheck'):
+ # Handle the deletion race condition as cleanly as possible
+ return False, None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to list health checks')
+
+ return True, 'delete'
+
+
+def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in):
+
+ # In general, if a request is repeated with the same CallerRef it won't
+ # result in a duplicate check appearing. This means we can safely use our
+ # retry decorators
+ caller_ref = str(uuid.uuid4())
+ missing_args = []
+
+ health_check = dict(
+ Type=type_in,
+ RequestInterval=request_interval_in,
+ Port=port_in,
+ )
+ if module.params.get('disabled') is not None:
+ health_check['Disabled'] = module.params.get('disabled')
+ if ip_addr_in:
+ health_check['IPAddress'] = ip_addr_in
+ if fqdn_in:
+ health_check['FullyQualifiedDomainName'] = fqdn_in
+
+ if type_in in ['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
+ resource_path = module.params.get('resource_path')
+ # if not resource_path:
+ # missing_args.append('resource_path')
+ if resource_path:
+ health_check['ResourcePath'] = resource_path
+ if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
+ string_match = module.params.get('string_match')
+ if not string_match:
+ missing_args.append('string_match')
+ health_check['SearchString'] = module.params.get('string_match')
+
+ failure_threshold = module.params.get('failure_threshold')
+ if not failure_threshold:
+ failure_threshold = 3
+ health_check['FailureThreshold'] = failure_threshold
+
+ if missing_args:
+ module.fail_json(msg='missing required arguments for creation: {0}'.format(
+ ', '.join(missing_args)),
+ )
+
+ if module.check_mode:
+ return True, 'create', None
+
+ try:
+ result = client.create_health_check(
+ aws_retry=True,
+ CallerReference=caller_ref,
+ HealthCheckConfig=health_check,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to create health check.', health_check=health_check)
+
+ check_id = result.get('HealthCheck').get('Id')
+ return True, 'create', check_id
+
+
+def update_health_check(existing_check):
+ # It's possible to update following parameters
+ # - ResourcePath
+ # - SearchString
+ # - FailureThreshold
+ # - Disabled
+ # - IPAddress
+ # - Port
+ # - FullyQualifiedDomainName
+
+ changes = dict()
+ existing_config = existing_check.get('HealthCheckConfig')
+
+ resource_path = module.params.get('resource_path', None)
+ if resource_path and resource_path != existing_config.get('ResourcePath'):
+ changes['ResourcePath'] = resource_path
+
+ search_string = module.params.get('string_match', None)
+ if search_string and search_string != existing_config.get('SearchString'):
+ changes['SearchString'] = search_string
+
+ failure_threshold = module.params.get('failure_threshold', None)
+ if failure_threshold and failure_threshold != existing_config.get('FailureThreshold'):
+ changes['FailureThreshold'] = failure_threshold
+
+ disabled = module.params.get('disabled', None)
+ if disabled is not None and disabled != existing_config.get('Disabled'):
+ changes['Disabled'] = module.params.get('disabled')
+
+ # If updating based on Health Check ID or health_check_name, we can update
+ if module.params.get('health_check_id') or module.params.get('use_unique_names'):
+ ip_address = module.params.get('ip_address', None)
+ if ip_address is not None and ip_address != existing_config.get('IPAddress'):
+ changes['IPAddress'] = module.params.get('ip_address')
+
+ port = module.params.get('port', None)
+ if port is not None and port != existing_config.get('Port'):
+ changes['Port'] = module.params.get('port')
+
+ fqdn = module.params.get('fqdn', None)
+ if fqdn is not None and fqdn != existing_config.get('FullyQualifiedDomainName'):
+ changes['FullyQualifiedDomainName'] = module.params.get('fqdn')
+
+ # No changes...
+ if not changes:
+ return False, None
+ if module.check_mode:
+ return True, 'update'
+
+ check_id = existing_check.get('Id')
+ # This makes sure we're starting from the version we think we are...
+ version_id = existing_check.get('HealthCheckVersion', 1)
+ try:
+ client.update_health_check(
+ HealthCheckId=check_id,
+ HealthCheckVersion=version_id,
+ **changes,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to update health check.', id=check_id)
+
+ return True, 'update'
+
+
+def describe_health_check(id):
+ if not id:
+ return dict()
+
+ try:
+ result = client.get_health_check(
+ aws_retry=True,
+ HealthCheckId=id,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to get health check.', id=id)
+
+ health_check = result.get('HealthCheck', {})
+ health_check = camel_dict_to_snake_dict(health_check)
+ tags = get_tags(module, client, 'healthcheck', id)
+ health_check['tags'] = tags
+ return health_check
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ disabled=dict(type='bool'),
+ ip_address=dict(),
+ port=dict(type='int'),
+ type=dict(choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
+ resource_path=dict(),
+ fqdn=dict(),
+ string_match=dict(),
+ request_interval=dict(type='int', choices=[10, 30], default=30),
+ failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ health_check_id=dict(type='str', aliases=['id'], required=False),
+ health_check_name=dict(type='str', aliases=['name'], required=False),
+ use_unique_names=dict(type='bool', required=False),
+ )
+
+ args_one_of = [
+ ['ip_address', 'fqdn', 'health_check_id'],
+ ]
+
+ args_if = [
+ ['type', 'TCP', ('port',)],
+ ]
+
+ args_required_together = [
+ ['use_unique_names', 'health_check_name'],
+ ]
+
+ args_mutually_exclusive = [
+ ['health_check_id', 'health_check_name']
+ ]
+
+ global module
+ global client
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=args_one_of,
+ required_if=args_if,
+ required_together=args_required_together,
+ mutually_exclusive=args_mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ if not module.params.get('health_check_id') and not module.params.get('type'):
+ module.fail_json(msg="parameter 'type' is required if not updating or deleting health check by ID.")
+
+ state_in = module.params.get('state')
+ ip_addr_in = module.params.get('ip_address')
+ port_in = module.params.get('port')
+ type_in = module.params.get('type')
+ resource_path_in = module.params.get('resource_path')
+ fqdn_in = module.params.get('fqdn')
+ string_match_in = module.params.get('string_match')
+ request_interval_in = module.params.get('request_interval')
+ failure_threshold_in = module.params.get('failure_threshold')
+ health_check_name = module.params.get('health_check_name')
+ tags = module.params.get('tags')
+
+ # Default port
+ if port_in is None:
+ if type_in in ['HTTP', 'HTTP_STR_MATCH']:
+ port_in = 80
+ elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
+ port_in = 443
+
+ if string_match_in:
+ if type_in not in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
+ module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types")
+ if len(string_match_in) > 255:
+ module.fail_json(msg="parameter 'string_match' is limited to 255 characters max")
+
+ client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff())
+
+ changed = False
+ action = None
+ check_id = None
+
+ if module.params.get('use_unique_names') or module.params.get('health_check_id'):
+ module.deprecate(
+ 'The health_check_name is currently non required parameter.'
+ ' This behavior will change and health_check_name '
+ ' will change to required=True and use_unique_names will change to default=True in release 6.0.0.',
+ version='6.0.0', collection_name='amazon.aws')
+
+ # If update or delete Health Check based on ID
+ update_delete_by_id = False
+ if module.params.get('health_check_id'):
+ update_delete_by_id = True
+ id_to_update_delete = module.params.get('health_check_id')
+ try:
+ existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)['HealthCheck']
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.exit_json(changed=False, msg='The specified health check with ID: {0} does not exist'.format(id_to_update_delete))
+ else:
+ existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in)
+ if existing_check:
+ check_id = existing_check.get('Id')
+
+ # Delete Health Check
+ if state_in == 'absent':
+ if update_delete_by_id:
+ changed, action = delete_health_check(id_to_update_delete)
+ else:
+ changed, action = delete_health_check(check_id)
+ check_id = None
+
+ # Create Health Check
+ elif state_in == 'present':
+ if existing_check is None and not module.params.get('use_unique_names') and not update_delete_by_id:
+ changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in)
+
+ # Update Health Check
+ else:
+ # If health_check_name is a unique identifier
+ if module.params.get('use_unique_names'):
+ existing_checks_with_name = get_existing_checks_with_name()
+ # update the health_check if another health check with same name exists
+ if health_check_name in existing_checks_with_name:
+ changed, action = update_health_check(existing_checks_with_name[health_check_name])
+ else:
+ # create a new health_check if another health check with same name does not exists
+ changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in)
+ # Add tag to add name to health check
+ if check_id:
+ if not tags:
+ tags = {}
+ tags['Name'] = health_check_name
+
+ else:
+ if update_delete_by_id:
+ changed, action = update_health_check(existing_check)
+ else:
+ changed, action = update_health_check(existing_check)
+
+ if check_id:
+ changed |= manage_tags(module, client, 'healthcheck', check_id,
+ tags, module.params.get('purge_tags'))
+
+ health_check = describe_health_check(id=check_id)
+ health_check['action'] = action
+ module.exit_json(
+ changed=changed,
+ health_check=health_check,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_info.py b/ansible_collections/amazon/aws/plugins/modules/route53_info.py
new file mode 100644
index 00000000..a2a83c6c
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/route53_info.py
@@ -0,0 +1,836 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: route53_info
+short_description: Retrieves route53 details using AWS methods
+version_added: 5.0.0
+description:
+ - Gets various details related to Route53 zone, record set or health check details.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ query:
+ description:
+ - Specifies the query action to take.
+ required: True
+ choices: [
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ]
+ type: str
+ change_id:
+ description:
+ - The ID of the change batch request.
+ - The value that you specify here is the value that
+ ChangeResourceRecordSets returned in the Id element
+ when you submitted the request.
+ - Required if I(query=change).
+ required: false
+ type: str
+ hosted_zone_id:
+ description:
+ - The Hosted Zone ID of the DNS zone.
+ - Required if I(query) is set to I(hosted_zone) and I(hosted_zone_method) is set to I(details).
+ - Required if I(query) is set to I(record_sets).
+ required: false
+ type: str
+ max_items:
+ description:
+ - Maximum number of items to return for various get/list requests.
+ required: false
+ type: int
+ next_marker:
+ description:
+ - "Some requests such as list_command: hosted_zones will return a maximum
+ number of entries - EG 100 or the number specified by I(max_items).
+ If the number of entries exceeds this maximum another request can be sent
+ using the NextMarker entry from the first response to get the next page
+ of results."
+ required: false
+ type: str
+ delegation_set_id:
+ description:
+ - The DNS Zone delegation set ID.
+ required: false
+ type: str
+ start_record_name:
+ description:
+ - "The first name in the lexicographic ordering of domain names that you want
+ the list_command: record_sets to start listing from."
+ required: false
+ type: str
+ type:
+ description:
+ - The type of DNS record.
+ required: false
+ choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' ]
+ type: str
+ dns_name:
+ description:
+ - The first name in the lexicographic ordering of domain names that you want
+ the list_command to start listing from.
+ required: false
+ type: str
+ resource_id:
+ description:
+ - The ID/s of the specified resource/s.
+ - Required if I(query=health_check) and I(health_check_method=tags).
+ - Required if I(query=hosted_zone) and I(hosted_zone_method=tags).
+ required: false
+ aliases: ['resource_ids']
+ type: list
+ elements: str
+ health_check_id:
+ description:
+ - The ID of the health check.
+ - Required if C(query) is set to C(health_check) and
+ C(health_check_method) is set to C(details) or C(status) or C(failure_reason).
+ required: false
+ type: str
+ hosted_zone_method:
+ description:
+ - "This is used in conjunction with query: hosted_zone.
+ It allows for listing details, counts or tags of various
+ hosted zone details."
+ required: false
+ choices: [
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+ type: str
+ health_check_method:
+ description:
+ - "This is used in conjunction with query: health_check.
+ It allows for listing details, counts or tags of various
+ health check details."
+ required: false
+ choices: [
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+ type: str
+author:
+ - Karen Cheng (@Etherdaemon)
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Simple example of listing all hosted zones
+- name: List all hosted zones
+ amazon.aws.route53_info:
+ query: hosted_zone
+ register: hosted_zones
+
+# Getting a count of hosted zones
+- name: Return a count of all hosted zones
+ amazon.aws.route53_info:
+ query: hosted_zone
+ hosted_zone_method: count
+ register: hosted_zone_count
+
+- name: List the first 20 resource record sets in a given hosted zone
+ amazon.aws.route53_info:
+ profile: account_name
+ query: record_sets
+ hosted_zone_id: ZZZ1111112222
+ max_items: 20
+ register: record_sets
+
+- name: List first 20 health checks
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_method: list
+ max_items: 20
+ register: health_checks
+
+- name: Get health check last failure_reason
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_method: failure_reason
+ health_check_id: 00000000-1111-2222-3333-12345678abcd
+ register: health_check_failure_reason
+
+- name: Retrieve reusable delegation set details
+ amazon.aws.route53_info:
+ query: reusable_delegation_set
+ delegation_set_id: delegation id
+ register: delegation_sets
+
+- name: setup of example for using next_marker
+ amazon.aws.route53_info:
+ query: hosted_zone
+ max_items: 1
+ register: first_info
+
+- name: example for using next_marker
+ amazon.aws.route53_info:
+ query: hosted_zone
+ next_marker: "{{ first_info.NextMarker }}"
+ max_items: 1
+ when: "{{ 'NextMarker' in first_info }}"
+
+- name: retrieve host entries starting with host1.workshop.test.io
+ block:
+ - name: grab zone id
+ amazon.aws.route53_zone:
+ zone: "test.io"
+ register: AWSINFO
+
+ - name: grab Route53 record information
+ amazon.aws.route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: "{{ AWSINFO.zone_id }}"
+ start_record_name: "host1.workshop.test.io"
+ register: RECORDS
+'''
+
+RETURN = r'''
+resource_record_sets:
+ description: A list of resource record sets returned by list_resource_record_sets in boto3.
+ returned: when I(query=record_sets)
+ type: list
+ elements: dict
+ contains:
+ name:
+ description: The name of a record in the specified hosted zone.
+ type: str
+ sample: 'www.example.com'
+ type:
+ description: The DNS record type.
+ type: str
+ sample: 'A'
+ ttl:
+ description: The resource record cache time to live (TTL), in seconds.
+ type: int
+ sample: 60
+ set_identifier:
+ description: An identifier that differentiates among multiple resource record sets that have the same combination of name and type.
+ type: str
+ sample: 'abcd'
+ resource_records:
+ description: Information about the resource records.
+ type: list
+ elements: dict
+ contains:
+ value:
+ description: The current or new DNS record value.
+ type: str
+ sample: 'ns-12.awsdns-34.com.'
+ geo_location:
+ description: The specified geographic location for which the Route53 responds to based on location.
+ type: dict
+ elements: str
+ contains:
+ continent_code:
+ description: The two-letter code for the continent.
+ type: str
+ sample: 'NA'
+ country_code:
+ description: The two-letter code for a country.
+ type: str
+ sample: 'US'
+ subdivision_code:
+ description: The two-letter code for a state of the United States
+ type: str
+ sample: 'NY'
+ version_added: 4.0.0
+ version_added_collection: community.aws
+hosted_zones:
+ description: A list of hosted zones returned by list_hosted_zones in boto3.
+ returned: when I(query=hosted_zone)
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: The ID of the hosted zone assigned by Amazon Route53 to the hosted zone at the creation time.
+ type: str
+ sample: '/hostedzone/Z01234567AB1234567890'
+ name:
+ description: The name of the domain.
+ type: str
+ sample: 'example.io'
+ resource_record_set_count:
+ description: The number of resource record sets in the hosted zone.
+ type: int
+ sample: 3
+ caller_reference:
+ description: The value specified for CallerReference at the time of hosted zone creation.
+ type: str
+ sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z'
+ config:
+ description: A dict that contains Comment and PrivateZone elements.
+ type: dict
+ contains:
+ comment:
+ description: Any comments that included about in the hosted zone.
+ type: str
+ sample: 'HostedZone created by Route53 Registrar'
+ private_zone:
+ description: A value that indicates whether this is a private hosted zone or not.
+ type: bool
+ sample: false
+ version_added: 4.0.0
+ version_added_collection: community.aws
+health_checks:
+ description: A list of Route53 health checks returned by list_health_checks in boto3.
+ type: list
+ elements: dict
+ returned: when I(query=health_check)
+ contains:
+ id:
+ description: The identifier that Amazon Route53 assigned to the health check at the time of creation.
+ type: str
+ sample: '12345cdc-2cc4-1234-bed2-123456abc1a2'
+ health_check_version:
+ description: The version of the health check.
+ type: str
+ sample: 1
+ caller_reference:
+ description: A unique string that you specified when you created the health check.
+ type: str
+ sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z'
+ health_check_config:
+ description: A dict that contains detailed information about one health check.
+ type: dict
+ contains:
+ disabled:
+ description: Whether Route53 should stop performing health checks on a endpoint.
+ type: bool
+ sample: false
+ enable_sni:
+ description: Whether Route53 should send value of FullyQualifiedDomainName to endpoint in client_hello message during TLS negotiation.
+ type: bool
+ sample: true
+ failure_threshold:
+ description: The number of consecutive health checks that an endpoint must pass/fail for Route53 to change current status of endpoint.
+ type: int
+ sample: 3
+ fully_qualified_domain_name:
+ description: The fully qualified DNS name of the endpoint on which Route53 performs health checks.
+ type: str
+ sample: 'hello'
+ inverted:
+ description: Whether Route53 should invert the status of a health check.
+ type: bool
+ sample: false
+ ip_address:
+ description: The IPv4/IPv6 IP address of the endpoint that Route53 should perform health checks on.
+ type: str
+ sample: 192.0.2.44
+ measure_latency:
+ description: Whether Route53 should measure latency between health checkers in multiple AWS regions and the endpoint.
+ type: bool
+ sample: false
+ port:
+ description: The port of the endpoint that Route53 should perform health checks on.
+ type: int
+ sample: 80
+ request_interval:
+ description: The number of seconds between the time that Route53 gets a response from endpoint and the next health check request.
+ type: int
+ sample: 30
+ resource_path:
+ description: The path that Route53 requests when performing health checks.
+ type: str
+ sample: '/welcome.html'
+ search_string:
+ description: The string that Route53 uses to search for in the response body from specified resource.
+ type: str
+ sample: 'test-string-to-match'
+ type:
+ description: The type of the health check.
+ type: str
+ sample: HTTPS
+ version_added: 4.0.0
+ version_added_collection: community.aws
+checker_ip_ranges:
+ description: A list of IP ranges in CIDR format for Amazon Route 53 health checkers.
+ returned: when I(query=checker_ip_range)
+ type: list
+ elements: str
+ version_added: 4.1.0
+ version_added_collection: community.aws
+delegation_sets:
+ description: A list of dicts that contains information about the reusable delegation set.
+ returned: when I(query=reusable_delegation_set)
+ type: list
+ elements: dict
+ version_added: 4.1.0
+ version_added_collection: community.aws
+health_check:
+ description: A dict of Route53 health check details returned by get_health_check_status in boto3.
+ type: dict
+ returned: when I(query=health_check) and I(health_check_method=details)
+ contains:
+ id:
+ description: The identifier that Amazon Route53 assigned to the health check at the time of creation.
+ type: str
+ sample: '12345cdc-2cc4-1234-bed2-123456abc1a2'
+ health_check_version:
+ description: The version of the health check.
+ type: str
+ sample: 1
+ caller_reference:
+ description: A unique string that you specified when you created the health check.
+ type: str
+ sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z'
+ health_check_config:
+ description: A dict that contains detailed information about one health check.
+ type: dict
+ contains:
+ disabled:
+ description: Whether Route53 should stop performing health checks on a endpoint.
+ type: bool
+ sample: false
+ enable_sni:
+ description: Whether Route53 should send value of FullyQualifiedDomainName to endpoint in client_hello message during TLS negotiation.
+ type: bool
+ sample: true
+ failure_threshold:
+ description: The number of consecutive health checks that an endpoint must pass/fail for Route53 to change current status of endpoint.
+ type: int
+ sample: 3
+ fully_qualified_domain_name:
+ description: The fully qualified DNS name of the endpoint on which Route53 performs health checks.
+ type: str
+ sample: 'hello'
+ inverted:
+ description: Whether Route53 should invert the status of a health check.
+ type: bool
+ sample: false
+ ip_address:
+ description: The IPv4/IPv6 IP address of the endpoint that Route53 should perform health checks on.
+ type: str
+ sample: 192.0.2.44
+ measure_latency:
+ description: Whether Route53 should measure latency between health checkers in multiple AWS regions and the endpoint.
+ type: bool
+ sample: false
+ port:
+ description: The port of the endpoint that Route53 should perform health checks on.
+ type: int
+ sample: 80
+ request_interval:
+ description: The number of seconds between the time that Route53 gets a response from endpoint and the next health check request.
+ type: int
+ sample: 30
+ resource_path:
+ description: The path that Route53 requests when performing health checks.
+ type: str
+ sample: '/welcome.html'
+ search_string:
+ description: The string that Route53 uses to search for in the response body from specified resource.
+ type: str
+ sample: 'test-string-to-match'
+ type:
+ description: The type of the health check.
+ type: str
+ sample: HTTPS
+ version_added: 4.1.0
+ version_added_collection: community.aws
+ResourceRecordSets:
+ description: A deprecated CamelCased list of resource record sets returned by list_resource_record_sets in boto3. \
+ This list contains same elements/parameters as it's snake_cased version mentioned above. \
+ This field is deprecated and will be removed in 6.0.0 version release.
+ returned: when I(query=record_sets)
+ type: list
+ elements: dict
+HostedZones:
+ description: A deprecated CamelCased list of hosted zones returned by list_hosted_zones in boto3. \
+ This list contains same elements/parameters as it's snake_cased version mentioned above. \
+ This field is deprecated and will be removed in 6.0.0 version release.
+ returned: when I(query=hosted_zone)
+ type: list
+ elements: dict
+HealthChecks:
+ description: A deprecated CamelCased list of Route53 health checks returned by list_health_checks in boto3. \
+ This list contains same elements/parameters as it's snake_cased version mentioned above. \
+ This field is deprecated and will be removed in 6.0.0 version release.
+ type: list
+ elements: dict
+ returned: when I(query=health_check)
+CheckerIpRanges:
+ description: A deprecated CamelCased list of IP ranges in CIDR format for Amazon Route 53 health checkers.\
+ This list contains same elements/parameters as it's snake_cased version mentioned abobe. \
+ This field is deprecated and will be removed in 6.0.0 version release.
+ type: list
+ elements: str
+ returned: when I(query=checker_ip_range)
+DelegationSets:
+ description: A deprecated CamelCased list of dicts that contains information about the reusable delegation set. \
+ This list contains same elements/parameters as it's snake_cased version mentioned above. \
+ This field is deprecated and will be removed in 6.0.0 version release.
+ type: list
+ elements: dict
+ returned: when I(query=reusable_delegation_set)
+HealthCheck:
+ description: A deprecated CamelCased dict of Route53 health check details returned by get_health_check_status in boto3. \
+ This dict contains same elements/parameters as it's snake_cased version mentioned above. \
+ This field is deprecated and will be removed in 6.0.0 version release.
+ type: dict
+ returned: when I(query=health_check) and I(health_check_method=details)
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+# Split out paginator to allow for the backoff decorator to function
+@AWSRetry.jittered_backoff()
+def _paginated_result(paginator_name, **params):
+ paginator = client.get_paginator(paginator_name)
+ return paginator.paginate(**params).build_full_result()
+
+
+def get_hosted_zone():
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['Id'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ return client.get_hosted_zone(**params)
+
+
+def reusable_delegation_set_details():
+ params = dict()
+
+ if not module.params.get('delegation_set_id'):
+ if module.params.get('max_items'):
+ params['MaxItems'] = str(module.params.get('max_items'))
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ results = client.list_reusable_delegation_sets(**params)
+ else:
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+ results = client.get_reusable_delegation_set(**params)
+
+ results['delegation_sets'] = results['DelegationSets']
+ module.deprecate("The 'CamelCase' return values with key 'DelegationSets' is deprecated and \
+ will be replaced by 'snake_case' return values with key 'delegation_sets'. \
+ Both case values are returned for now.",
+ date='2025-01-01', collection_name='amazon.aws')
+
+ return results
+
+
+def list_hosted_zones():
+ params = dict()
+
+ # Set PaginationConfig with max_items
+ if module.params.get('max_items'):
+ params['PaginationConfig'] = dict(
+ MaxItems=module.params.get('max_items')
+ )
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ if module.params.get('delegation_set_id'):
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+
+ zones = _paginated_result('list_hosted_zones', **params)['HostedZones']
+ snaked_zones = [camel_dict_to_snake_dict(zone) for zone in zones]
+
+ module.deprecate("The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and \
+ will be replaced by 'snake_case' return values with key 'hosted_zones'. \
+ Both case values are returned for now.",
+ date='2025-01-01', collection_name='amazon.aws')
+
+ return {
+ "HostedZones": zones,
+ "list": zones,
+ "hosted_zones": snaked_zones,
+ }
+
+
+def list_hosted_zones_by_name():
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+
+ if module.params.get('dns_name'):
+ params['DNSName'] = module.params.get('dns_name')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = str(module.params.get('max_items'))
+
+ return client.list_hosted_zones_by_name(**params)
+
+
+def change_details():
+ params = dict()
+
+ if module.params.get('change_id'):
+ params['Id'] = module.params.get('change_id')
+ else:
+ module.fail_json(msg="change_id is required")
+
+ results = client.get_change(**params)
+ return results
+
+
+def checker_ip_range_details():
+ results = client.get_checker_ip_ranges()
+ results['checker_ip_ranges'] = results['CheckerIpRanges']
+ module.deprecate("The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and \
+ will be replaced by 'snake_case' return values with key 'checker_ip_ranges'. \
+ Both case values are returned for now.",
+ date='2025-01-01', collection_name='amazon.aws')
+
+ return results
+
+
+def get_count():
+ if module.params.get('query') == 'health_check':
+ results = client.get_health_check_count()
+ else:
+ results = client.get_hosted_zone_count()
+
+ return results
+
+
+def get_health_check():
+ params = dict()
+
+ if not module.params.get('health_check_id'):
+ module.fail_json(msg="health_check_id is required")
+ else:
+ params['HealthCheckId'] = module.params.get('health_check_id')
+
+ if module.params.get('health_check_method') == 'details':
+ results = client.get_health_check(**params)
+ elif module.params.get('health_check_method') == 'failure_reason':
+ results = client.get_health_check_last_failure_reason(**params)
+ elif module.params.get('health_check_method') == 'status':
+ results = client.get_health_check_status(**params)
+
+ results['health_check'] = camel_dict_to_snake_dict(results['HealthCheck'])
+ module.deprecate("The 'CamelCase' return values with key 'HealthCheck' is deprecated and \
+ will be replaced by 'snake_case' return values with key 'health_check'. \
+ Both case values are returned for now.",
+ date='2025-01-01', collection_name='amazon.aws')
+
+ return results
+
+
+def get_resource_tags():
+ params = dict()
+
+ if module.params.get('resource_id'):
+ params['ResourceIds'] = module.params.get('resource_id')
+ else:
+ module.fail_json(msg="resource_id or resource_ids is required")
+
+ if module.params.get('query') == 'health_check':
+ params['ResourceType'] = 'healthcheck'
+ else:
+ params['ResourceType'] = 'hostedzone'
+
+ return client.list_tags_for_resources(**params)
+
+
+def list_health_checks():
+ params = dict()
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ # Set PaginationConfig with max_items
+ if module.params.get('max_items'):
+ params['PaginationConfig'] = dict(
+ MaxItems=module.params.get('max_items')
+ )
+
+ health_checks = _paginated_result('list_health_checks', **params)['HealthChecks']
+ snaked_health_checks = [camel_dict_to_snake_dict(health_check) for health_check in health_checks]
+
+ module.deprecate("The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and \
+ will be replaced by 'snake_case' return values with key 'health_checks'. \
+ Both case values are returned for now.",
+ date='2025-01-01', collection_name='amazon.aws')
+
+ return {
+ "HealthChecks": health_checks,
+ "list": health_checks,
+ "health_checks": snaked_health_checks,
+ }
+
+
+def record_sets_details():
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ if module.params.get('start_record_name'):
+ params['StartRecordName'] = module.params.get('start_record_name')
+
+ # Check that both params are set if type is applied
+ if module.params.get('type') and not module.params.get('start_record_name'):
+ module.fail_json(msg="start_record_name must be specified if type is set")
+
+ if module.params.get('type'):
+ params['StartRecordType'] = module.params.get('type')
+
+ # Set PaginationConfig with max_items
+ if module.params.get('max_items'):
+ params['PaginationConfig'] = dict(
+ MaxItems=module.params.get('max_items')
+ )
+
+ record_sets = _paginated_result('list_resource_record_sets', **params)['ResourceRecordSets']
+ snaked_record_sets = [camel_dict_to_snake_dict(record_set) for record_set in record_sets]
+
+ module.deprecate("The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and \
+ will be replaced by 'snake_case' return values with key 'resource_record_sets'. \
+ Both case values are returned for now.",
+ date='2025-01-01', collection_name='amazon.aws')
+
+ return {
+ "ResourceRecordSets": record_sets,
+ "list": record_sets,
+ "resource_record_sets": snaked_record_sets,
+ }
+
+
+def health_check_details():
+ health_check_invocations = {
+ 'list': list_health_checks,
+ 'details': get_health_check,
+ 'status': get_health_check,
+ 'failure_reason': get_health_check,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = health_check_invocations[module.params.get('health_check_method')]()
+ return results
+
+
+def hosted_zone_details():
+ hosted_zone_invocations = {
+ 'details': get_hosted_zone,
+ 'list': list_hosted_zones,
+ 'list_by_name': list_hosted_zones_by_name,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = hosted_zone_invocations[module.params.get('hosted_zone_method')]()
+ return results
+
+
+def main():
+ global module
+ global client
+
+ argument_spec = dict(
+ query=dict(choices=[
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ], required=True),
+ change_id=dict(),
+ hosted_zone_id=dict(),
+ max_items=dict(type='int'),
+ next_marker=dict(),
+ delegation_set_id=dict(),
+ start_record_name=dict(),
+ type=dict(type='str', choices=[
+ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS'
+ ]),
+ dns_name=dict(),
+ resource_id=dict(type='list', aliases=['resource_ids'], elements='str'),
+ health_check_id=dict(),
+ hosted_zone_method=dict(choices=[
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags'
+ ], default='list'),
+ health_check_method=dict(choices=[
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ], default='list'),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['hosted_zone_method', 'health_check_method'],
+ ],
+ check_boto3=False,
+ )
+
+ try:
+ client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ invocations = {
+ 'change': change_details,
+ 'checker_ip_range': checker_ip_range_details,
+ 'health_check': health_check_details,
+ 'hosted_zone': hosted_zone_details,
+ 'record_sets': record_sets_details,
+ 'reusable_delegation_set': reusable_delegation_set_details,
+ }
+
+ results = dict(changed=False)
+ try:
+ results = invocations[module.params.get('query')]()
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_zone.py b/ansible_collections/amazon/aws/plugins/modules/route53_zone.py
new file mode 100644
index 00000000..2cc97980
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/route53_zone.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: route53_zone
+short_description: add or delete Route53 zones
+version_added: 5.0.0
+description:
+ - Creates and deletes Route53 private and public zones.
+ - This module was originally added to C(community.aws) in release 1.0.0.
+options:
+ zone:
+ description:
+ - "The DNS zone record (eg: foo.com.)"
+ required: true
+ type: str
+ state:
+ description:
+ - Whether or not the zone should exist or not.
+ default: present
+ choices: [ "present", "absent" ]
+ type: str
+ vpc_id:
+ description:
+ - The VPC ID the zone should be a part of (if this is going to be a private zone).
+ type: str
+ vpc_region:
+ description:
+ - The VPC Region the zone should be a part of (if this is going to be a private zone).
+ type: str
+ comment:
+ description:
+ - Comment associated with the zone.
+ default: ''
+ type: str
+ hosted_zone_id:
+ description:
+ - The unique zone identifier you want to delete or "all" if there are many zones with the same domain name.
+ - Required if there are multiple zones identified with the above options.
+ type: str
+ delegation_set_id:
+ description:
+ - The reusable delegation set ID to be associated with the zone.
+ - Note that you can't associate a reusable delegation set with a private hosted zone.
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 2.1.0.
+author:
+ - "Christopher Troup (@minichate)"
+'''
+
+EXAMPLES = r'''
+- name: create a public zone
+ amazon.aws.route53_zone:
+ zone: example.com
+ comment: this is an example
+
+- name: delete a public zone
+ amazon.aws.route53_zone:
+ zone: example.com
+ state: absent
+
+- name: create a private zone
+ amazon.aws.route53_zone:
+ zone: devel.example.com
+ vpc_id: '{{ myvpc_id }}'
+ vpc_region: us-west-2
+ comment: developer domain
+
+- name: create a public zone associated with a specific reusable delegation set
+ amazon.aws.route53_zone:
+ zone: example.com
+ comment: reusable delegation set example
+ delegation_set_id: A1BCDEF2GHIJKL
+
+- name: create a public zone with tags
+ amazon.aws.route53_zone:
+ zone: example.com
+ comment: this is an example
+ tags:
+ Owner: Ansible Team
+
+- name: modify a public zone, removing all previous tags and adding a new one
+ amazon.aws.route53_zone:
+ zone: example.com
+ comment: this is an example
+ tags:
+ Support: Ansible Community
+ purge_tags: true
+'''
+
+RETURN = r'''
+comment:
+ description: optional hosted zone comment
+ returned: when hosted zone exists
+ type: str
+ sample: "Private zone"
+name:
+ description: hosted zone name
+ returned: when hosted zone exists
+ type: str
+ sample: "private.local."
+private_zone:
+ description: whether hosted zone is private or public
+ returned: when hosted zone exists
+ type: bool
+ sample: true
+vpc_id:
+ description: id of vpc attached to private hosted zone
+ returned: for private hosted zone
+ type: str
+ sample: "vpc-1d36c84f"
+vpc_region:
+ description: region of vpc attached to private hosted zone
+ returned: for private hosted zone
+ type: str
+ sample: "eu-west-1"
+zone_id:
+ description: hosted zone id
+ returned: when hosted zone exists
+ type: str
+ sample: "Z6JQG9820BEFMW"
+delegation_set_id:
+ description: id of the associated reusable delegation set
+ returned: for public hosted zones, if they have been associated with a reusable delegation set
+ type: str
+ sample: "A1BCDEF2GHIJKL"
+tags:
+ description: tags associated with the zone
+ returned: when tags are defined
+ type: dict
+'''
+
+import time
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags
+from ansible_collections.amazon.aws.plugins.module_utils.route53 import get_tags
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+@AWSRetry.jittered_backoff()
+def _list_zones():
+ paginator = client.get_paginator('list_hosted_zones')
+ return paginator.paginate().build_full_result()
+
+
+def find_zones(zone_in, private_zone):
+ try:
+ results = _list_zones()
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not list current hosted zones")
+ zones = []
+ for r53zone in results['HostedZones']:
+ if r53zone['Name'] != zone_in:
+ continue
+ # only save zone names that match the public/private setting
+ if (r53zone['Config']['PrivateZone'] and private_zone) or \
+ (not r53zone['Config']['PrivateZone'] and not private_zone):
+ zones.append(r53zone)
+
+ return zones
+
+
+def create(matching_zones):
+ zone_in = module.params.get('zone').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+ comment = module.params.get('comment')
+ delegation_set_id = module.params.get('delegation_set_id')
+ tags = module.params.get('tags')
+ purge_tags = module.params.get('purge_tags')
+
+ if not zone_in.endswith('.'):
+ zone_in += "."
+
+ private_zone = bool(vpc_id and vpc_region)
+
+ record = {
+ 'private_zone': private_zone,
+ 'vpc_id': vpc_id,
+ 'vpc_region': vpc_region,
+ 'comment': comment,
+ 'name': zone_in,
+ 'delegation_set_id': delegation_set_id,
+ 'zone_id': None,
+ }
+
+ if private_zone:
+ changed, result = create_or_update_private(matching_zones, record)
+ else:
+ changed, result = create_or_update_public(matching_zones, record)
+
+ zone_id = result.get('zone_id')
+ if zone_id:
+ if tags is not None:
+ changed |= manage_tags(module, client, 'hostedzone', zone_id, tags, purge_tags)
+ result['tags'] = get_tags(module, client, 'hostedzone', zone_id)
+ else:
+ result['tags'] = tags
+
+ return changed, result
+
+
+def create_or_update_private(matching_zones, record):
+ for z in matching_zones:
+ try:
+ result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
+ zone_details = result['HostedZone']
+ vpc_details = result['VPCs']
+ current_vpc_id = None
+ current_vpc_region = None
+ if isinstance(vpc_details, dict):
+ if vpc_details['VPC']['VPCId'] == record['vpc_id']:
+ current_vpc_id = vpc_details['VPC']['VPCId']
+ current_vpc_region = vpc_details['VPC']['VPCRegion']
+ else:
+ if record['vpc_id'] in [v['VPCId'] for v in vpc_details]:
+ current_vpc_id = record['vpc_id']
+ if record['vpc_region'] in [v['VPCRegion'] for v in vpc_details]:
+ current_vpc_region = record['vpc_region']
+
+ if record['vpc_id'] == current_vpc_id and record['vpc_region'] == current_vpc_region:
+ record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
+ if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
+ if not module.check_mode:
+ try:
+ client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
+ return True, record
+ else:
+ record['msg'] = "There is already a private hosted zone in the same region with the same VPC \
+ you chose. Unable to create a new private hosted zone in the same name space."
+ return False, record
+
+ if not module.check_mode:
+ try:
+ result = client.create_hosted_zone(
+ Name=record['name'],
+ HostedZoneConfig={
+ 'Comment': record['comment'] if record['comment'] is not None else "",
+ 'PrivateZone': True,
+ },
+ VPC={
+ 'VPCRegion': record['vpc_region'],
+ 'VPCId': record['vpc_id'],
+ },
+ CallerReference="%s-%s" % (record['name'], time.time()),
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not create hosted zone")
+
+ hosted_zone = result['HostedZone']
+ zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
+ record['zone_id'] = zone_id
+
+ changed = True
+ return changed, record
+
+
+def create_or_update_public(matching_zones, record):
+ zone_details, zone_delegation_set_details = None, {}
+ for matching_zone in matching_zones:
+ try:
+ zone = client.get_hosted_zone(Id=matching_zone['Id'])
+ zone_details = zone['HostedZone']
+ zone_delegation_set_details = zone.get('DelegationSet', {})
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id'])
+ if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
+ if not module.check_mode:
+ try:
+ client.update_hosted_zone_comment(
+ Id=zone_details['Id'],
+ Comment=record['comment']
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
+ changed = True
+ else:
+ changed = False
+ break
+
+ if zone_details is None:
+ if not module.check_mode:
+ try:
+ params = dict(
+ Name=record['name'],
+ HostedZoneConfig={
+ 'Comment': record['comment'] if record['comment'] is not None else "",
+ 'PrivateZone': False,
+ },
+ CallerReference="%s-%s" % (record['name'], time.time()),
+ )
+
+ if record.get('delegation_set_id') is not None:
+ params['DelegationSetId'] = record['delegation_set_id']
+
+ result = client.create_hosted_zone(**params)
+ zone_details = result['HostedZone']
+ zone_delegation_set_details = result.get('DelegationSet', {})
+
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not create hosted zone")
+ changed = True
+
+ if module.check_mode:
+ if zone_details:
+ record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
+ else:
+ record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
+ record['name'] = zone_details['Name']
+ record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '')
+
+ return changed, record
+
+
+def delete_private(matching_zones, vpc_id, vpc_region):
+ for z in matching_zones:
+ try:
+ result = client.get_hosted_zone(Id=z['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
+ zone_details = result['HostedZone']
+ vpc_details = result['VPCs']
+ if isinstance(vpc_details, dict):
+ if vpc_details['VPC']['VPCId'] == vpc_id and vpc_region == vpc_details['VPC']['VPCRegion']:
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=z['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
+ return True, "Successfully deleted %s" % zone_details['Name']
+ else:
+ if vpc_id in [v['VPCId'] for v in vpc_details] and vpc_region in [v['VPCRegion'] for v in vpc_details]:
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=z['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
+ return True, "Successfully deleted %s" % zone_details['Name']
+
+ return False, "The vpc_id and the vpc_region do not match a private hosted zone."
+
+
+def delete_public(matching_zones):
+ if len(matching_zones) > 1:
+ changed = False
+ msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone."
+ else:
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=matching_zones[0]['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id'])
+ changed = True
+ msg = "Successfully deleted %s" % matching_zones[0]['Id']
+ return changed, msg
+
+
+def delete_hosted_id(hosted_zone_id, matching_zones):
+ if hosted_zone_id == "all":
+ deleted = []
+ for z in matching_zones:
+ deleted.append(z['Id'])
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=z['Id'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
+ changed = True
+ msg = "Successfully deleted zones: %s" % deleted
+ elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]:
+ if not module.check_mode:
+ try:
+ client.delete_hosted_zone(Id=hosted_zone_id)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id)
+ changed = True
+ msg = "Successfully deleted zone: %s" % hosted_zone_id
+ else:
+ changed = False
+ msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id
+ return changed, msg
+
+
+def delete(matching_zones):
+ zone_in = module.params.get('zone').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+ hosted_zone_id = module.params.get('hosted_zone_id')
+
+ if not zone_in.endswith('.'):
+ zone_in += "."
+
+ private_zone = bool(vpc_id and vpc_region)
+
+ if zone_in in [z['Name'] for z in matching_zones]:
+ if hosted_zone_id:
+ changed, result = delete_hosted_id(hosted_zone_id, matching_zones)
+ else:
+ if private_zone:
+ changed, result = delete_private(matching_zones, vpc_id, vpc_region)
+ else:
+ changed, result = delete_public(matching_zones)
+ else:
+ changed = False
+ result = "No zone to delete."
+
+ return changed, result
+
+
+def main():
+ global module
+ global client
+
+ argument_spec = dict(
+ zone=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ vpc_id=dict(default=None),
+ vpc_region=dict(default=None),
+ comment=dict(default=''),
+ hosted_zone_id=dict(),
+ delegation_set_id=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ )
+
+ mutually_exclusive = [
+ ['delegation_set_id', 'vpc_id'],
+ ['delegation_set_id', 'vpc_region'],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ zone_in = module.params.get('zone').lower()
+ state = module.params.get('state').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+
+ if not zone_in.endswith('.'):
+ zone_in += "."
+
+ private_zone = bool(vpc_id and vpc_region)
+
+ client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff())
+
+ zones = find_zones(zone_in, private_zone)
+ if state == 'present':
+ changed, result = create(matching_zones=zones)
+ elif state == 'absent':
+ changed, result = delete(matching_zones=zones)
+
+ if isinstance(result, dict):
+ module.exit_json(changed=changed, result=result, **result)
+ else:
+ module.exit_json(changed=changed, result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
new file mode 100644
index 00000000..8a09858c
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
@@ -0,0 +1,1184 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_bucket
+version_added: 1.0.0
+short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
+description:
+ - Manage S3 buckets.
+ - Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID.
+ - When using non-AWS services, I(endpoint_url) should be specified.
+author:
+ - Rob White (@wimnat)
+ - Aubin Bikouo (@abikouo)
+options:
+ force:
+ description:
+ - When trying to delete a bucket, delete all keys (including versions and delete markers)
+ in the bucket first (an S3 bucket must be empty for a successful deletion).
+ type: bool
+ default: false
+ name:
+ description:
+ - Name of the S3 bucket.
+ required: true
+ type: str
+ policy:
+ description:
+ - The JSON policy as a string. Set to the string C("null") to force the absence of a policy.
+ type: json
+ ceph:
+ description:
+ - Enable API compatibility with Ceph RGW.
+ - It takes into account the S3 API subset working with Ceph in order to provide the same module
+ behaviour where possible.
+ - Requires I(endpoint_url) if I(ceph=true).
+ aliases: ['rgw']
+ type: bool
+ default: false
+ requester_pays:
+ description:
+ - With Requester Pays buckets, the requester instead of the bucket owner pays the cost
+ of the request and the data download from the bucket.
+ type: bool
+ state:
+ description:
+ - Create or remove the S3 bucket.
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended).
+ type: bool
+ encryption:
+ description:
+ - Describes the default server-side encryption to apply to new objects in the bucket.
+ In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
+ choices: [ 'none', 'AES256', 'aws:kms' ]
+ type: str
+ encryption_key_id:
+ description: KMS master key ID to use for the default encryption. This parameter is allowed if I(encryption) is C(aws:kms). If
+ not specified then it will default to the AWS provided KMS key.
+ type: str
+ bucket_key_enabled:
+ description:
+ - Enable S3 Bucket Keys for SSE-KMS on new objects.
+ - See the AWS documentation for more information
+ U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html).
+ - Bucket Key encryption is only supported if I(encryption=aws:kms).
+ required: false
+ type: bool
+ version_added: 4.1.0
+ public_access:
+ description:
+ - Configure public access block for S3 bucket.
+ - This option cannot be used together with I(delete_public_access).
+ suboptions:
+ block_public_acls:
+ description: Sets BlockPublicAcls value.
+ type: bool
+ default: False
+ block_public_policy:
+ description: Sets BlockPublicPolicy value.
+ type: bool
+ default: False
+ ignore_public_acls:
+ description: Sets IgnorePublicAcls value.
+ type: bool
+ default: False
+ restrict_public_buckets:
+ description: Sets RestrictPublicAcls value.
+ type: bool
+ default: False
+ type: dict
+ version_added: 1.3.0
+ delete_public_access:
+ description:
+ - Delete public access block configuration from bucket.
+ - This option cannot be used together with a I(public_access) definition.
+ default: false
+ type: bool
+ version_added: 1.3.0
+ object_ownership:
+ description:
+ - Allow bucket's ownership controls.
+ - C(BucketOwnerEnforced) - ACLs are disabled and no longer affect access permissions to your
+ bucket. Requests to set or update ACLs fail. However, requests to read ACLs are supported.
+ Bucket owner has full ownership and control. Object writer no longer has full ownership and
+ control.
+ - C(BucketOwnerPreferred) - Objects uploaded to the bucket change ownership to the bucket owner
+ if the objects are uploaded with the bucket-owner-full-control canned ACL.
+ - C(ObjectWriter) - The uploading account will own the object
+ if the object is uploaded with the bucket-owner-full-control canned ACL.
+ - This option cannot be used together with a I(delete_object_ownership) definition.
+ - C(BucketOwnerEnforced) has been added in version 3.2.0.
+ choices: [ 'BucketOwnerEnforced', 'BucketOwnerPreferred', 'ObjectWriter' ]
+ type: str
+ version_added: 2.0.0
+ delete_object_ownership:
+ description:
+ - Delete bucket's ownership controls.
+ - This option cannot be used together with a I(object_ownership) definition.
+ default: false
+ type: bool
+ version_added: 2.0.0
+ acl:
+ description:
+ - The canned ACL to apply to the bucket.
+ - If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+ ACLs are disabled and no longer affect permissions.
+ choices: [ 'private', 'public-read', 'public-read-write', 'authenticated-read' ]
+ type: str
+ version_added: 3.1.0
+ validate_bucket_name:
+ description:
+ - Whether the bucket name should be validated to conform to AWS S3 naming rules.
+ - On by default, this may be disabled for S3 backends that do not enforce these rules.
+ - See https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
+ type: bool
+ version_added: 3.1.0
+ default: True
+
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+
+notes:
+ - If C(requestPayment), C(policy), C(tagging) or C(versioning)
+ operations/API aren't implemented by the endpoint, module doesn't fail
+ if each parameter satisfies the following condition.
+ I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
+ - In release 5.0.0 the I(s3_url) parameter was merged into the I(endpoint_url) parameter,
+ I(s3_url) remains as an alias for I(endpoint_url).
+ - For Walrus I(endpoint_url) should be set to the FQDN of the endpoint with neither scheme nor path.
+ - Support for the C(S3_URL) environment variable has been
+ deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter
+ or the C(AWS_URL) environment variable.
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple S3 bucket
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+
+# Create a simple S3 bucket on Ceph Rados Gateway
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ endpoint_url: http://your-ceph-rados-gateway-server.xxx
+ ceph: true
+
+# Remove an S3 bucket and any keys it contains
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: absent
+ force: true
+
+# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ policy: "{{ lookup('file','policy.json') }}"
+ requester_pays: true
+ versioning: true
+ tags:
+ example: tag1
+ another: tag2
+
+# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
+- amazon.aws.s3_bucket:
+ name: mydobucket
+ endpoint_url: 'https://nyc3.digitaloceanspaces.com'
+
+# Create a bucket with AES256 encryption
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "AES256"
+
+# Create a bucket with aws:kms encryption, KMS key
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+ encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
+
+# Create a bucket with aws:kms encryption, Bucket key
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ bucket_key_enabled: true
+ encryption: "aws:kms"
+
+# Create a bucket with aws:kms encryption, default key
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+
+# Create a bucket with public policy block configuration
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ public_access:
+ block_public_acls: true
+ ignore_public_acls: true
+ ## keys == 'false' can be omitted, undefined keys defaults to 'false'
+ # block_public_policy: false
+ # restrict_public_buckets: false
+
+# Delete public policy block from bucket
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ delete_public_access: true
+
+# Create a bucket with object ownership controls set to ObjectWriter
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ object_ownership: ObjectWriter
+
+# Delete onwership controls from bucket
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ delete_object_ownership: true
+
+# Delete a bucket policy from bucket
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ policy: "null"
+
+# This example grants public-read to everyone on bucket using ACL
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ acl: public-read
+'''
+
+RETURN = r'''
+encryption:
+ description:
+ - Server-side encryption of the objects in the S3 bucket.
+ type: str
+ returned: I(state=present)
+ sample: ''
+name:
+ description: Name of the S3 bucket.
+ type: str
+ returned: I(state=present)
+ sample: "2d3ce10a8210d36d6b4d23b822892074complex"
+object_ownership:
+ description: S3 bucket's ownership controls.
+ type: str
+ returned: I(state=present)
+ sample: "BucketOwnerPreferred"
+policy:
+ description: S3 bucket's policy.
+ type: dict
+ returned: I(state=present)
+ sample: {
+ "Statement": [
+ {
+ "Action": "s3:GetObject",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Resource": "arn:aws:s3:::2d3ce10a8210d36d6b4d23b822892074complex/*",
+ "Sid": "AddPerm"
+ }
+ ],
+ "Version": "2012-10-17"
+ }
+requester_pays:
+ description:
+ - Indicates that the requester was successfully charged for the request.
+ type: str
+ returned: I(state=present)
+ sample: ''
+tags:
+ description: S3 bucket's tags.
+ type: dict
+ returned: I(state=present)
+ sample: {
+ "Tag1": "tag1",
+ "Tag2": "tag2"
+ }
+versioning:
+ description: S3 bucket's versioning configuration.
+ type: dict
+ returned: I(state=present)
+ sample: {
+ "MfaDelete": "Disabled",
+ "Versioning": "Enabled"
+ }
+acl:
+ description: S3 bucket's canned ACL.
+ type: dict
+ returned: I(state=present)
+ sample: 'public-read'
+'''
+
+import json
+import os
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.basic import to_text
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name
+
+
+def create_or_update_bucket(s3_client, module, location):
+
+ policy = module.params.get("policy")
+ name = module.params.get("name")
+ requester_pays = module.params.get("requester_pays")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ versioning = module.params.get("versioning")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+ bucket_key_enabled = module.params.get("bucket_key_enabled")
+ public_access = module.params.get("public_access")
+ delete_public_access = module.params.get("delete_public_access")
+ delete_object_ownership = module.params.get("delete_object_ownership")
+ object_ownership = module.params.get("object_ownership")
+ acl = module.params.get("acl")
+ changed = False
+ result = {}
+
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ try:
+ bucket_changed = create_bucket(s3_client, name, location)
+ s3_client.get_waiter('bucket_exists').wait(Bucket=name)
+ changed = changed or bucket_changed
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket")
+
+ # Versioning
+ try:
+ versioning_status = get_bucket_versioning(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e:
+ if versioning is not None:
+ module.fail_json_aws(e, msg="Failed to get bucket versioning")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket versioning")
+ else:
+ if versioning is not None:
+ required_versioning = None
+ if versioning and versioning_status.get('Status') != "Enabled":
+ required_versioning = 'Enabled'
+ elif not versioning and versioning_status.get('Status') == "Enabled":
+ required_versioning = 'Suspended'
+
+ if required_versioning:
+ try:
+ put_bucket_versioning(s3_client, name, required_versioning)
+ changed = True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket versioning")
+
+ versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
+
+ # This output format is there to ensure compatibility with previous versions of the module
+ result['versioning'] = {
+ 'Versioning': versioning_status.get('Status', 'Disabled'),
+ 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
+ }
+
+ # Requester pays
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e:
+ if requester_pays is not None:
+ module.fail_json_aws(e, msg="Failed to get bucket request payment")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket request payment")
+ else:
+ if requester_pays is not None:
+ payer = 'Requester' if requester_pays else 'BucketOwner'
+ if requester_pays_status != payer:
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
+ if requester_pays_status is None:
+ # We have seen that it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
+ changed = True
+
+ result['requester_pays'] = requester_pays
+
+ # Policy
+ try:
+ current_policy = get_bucket_policy(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e:
+ if policy is not None:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+ else:
+ if policy is not None:
+ if isinstance(policy, string_types):
+ policy = json.loads(policy)
+
+ if not policy and current_policy:
+ try:
+ delete_bucket_policy(s3_client, name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy)
+ changed = True
+ elif compare_policies(current_policy, policy):
+ try:
+ put_bucket_policy(s3_client, name, policy)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
+ if current_policy is None:
+ # As for request payement, it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_policy(s3_client, name, policy)
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
+ changed = True
+
+ result['policy'] = current_policy
+
+ # Tags
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e:
+ if tags is not None:
+ module.fail_json_aws(e, msg="Failed to get bucket tags")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket tags")
+ else:
+ if tags is not None:
+ # Tags are always returned as text
+ tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
+ if not purge_tags:
+ # Ensure existing tags that aren't updated by desired tags remain
+ current_copy = current_tags_dict.copy()
+ current_copy.update(tags)
+ tags = current_copy
+ if current_tags_dict != tags:
+ if tags:
+ try:
+ put_bucket_tagging(s3_client, name, tags)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket tags")
+ else:
+ if purge_tags:
+ try:
+ delete_bucket_tagging(s3_client, name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket tags")
+ current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
+ changed = True
+
+ result['tags'] = current_tags_dict
+
+ # Encryption
+ try:
+ current_encryption = get_bucket_encryption(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e:
+ if encryption is not None:
+ module.fail_json_aws(e, msg="Failed to get bucket encryption settings")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket encryption settings")
+ else:
+ if encryption is not None:
+ current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
+ current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
+ if encryption == 'none':
+ if current_encryption_algorithm is not None:
+ try:
+ delete_bucket_encryption(s3_client, name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
+ changed = True
+ else:
+ if (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
+ expected_encryption = {'SSEAlgorithm': encryption}
+ if encryption == 'aws:kms' and encryption_key_id is not None:
+ expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
+ current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption)
+ changed = True
+
+ if bucket_key_enabled is not None:
+ current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
+ if current_encryption_algorithm == 'aws:kms':
+ if get_bucket_key(s3_client, name) != bucket_key_enabled:
+ if bucket_key_enabled:
+ expected_encryption = True
+ else:
+ expected_encryption = False
+ current_encryption = put_bucket_key_with_retry(module, s3_client, name, expected_encryption)
+ changed = True
+ result['encryption'] = current_encryption
+ # Public access clock configuration
+ current_public_access = {}
+
+ try:
+ current_public_access = get_bucket_public_access(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e:
+ if public_access is not None:
+ module.fail_json_aws(e, msg="Failed to get bucket public access configuration")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket public access configuration")
+ else:
+ # -- Create / Update public access block
+ if public_access is not None:
+ camel_public_block = snake_dict_to_camel_dict(public_access, capitalize_first=True)
+
+ if current_public_access == camel_public_block:
+ result['public_access_block'] = current_public_access
+ else:
+ put_bucket_public_access(s3_client, name, camel_public_block)
+ changed = True
+ result['public_access_block'] = camel_public_block
+
+ # -- Delete public access block
+ if delete_public_access:
+ if current_public_access == {}:
+ result['public_access_block'] = current_public_access
+ else:
+ delete_bucket_public_access(s3_client, name)
+ changed = True
+ result['public_access_block'] = {}
+
+ # -- Bucket ownership
+ try:
+ bucket_ownership = get_bucket_ownership_cntrl(s3_client, name)
+ result['object_ownership'] = bucket_ownership
+ except KeyError as e:
+ # Some non-AWS providers appear to return policy documents that aren't
+ # compatible with AWS, cleanly catch KeyError so users can continue to use
+ # other features.
+ if delete_object_ownership or object_ownership is not None:
+ module.fail_json_aws(e, msg="Failed to get bucket object ownership settings")
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e:
+ if delete_object_ownership or object_ownership is not None:
+ module.fail_json_aws(e, msg="Failed to get bucket object ownership settings")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get bucket object ownership settings")
+ else:
+ if delete_object_ownership:
+ # delete S3 buckect ownership
+ if bucket_ownership is not None:
+ delete_bucket_ownership(s3_client, name)
+ changed = True
+ result['object_ownership'] = None
+ elif object_ownership is not None:
+ # update S3 bucket ownership
+ if bucket_ownership != object_ownership:
+ put_bucket_ownership(s3_client, name, object_ownership)
+ changed = True
+ result['object_ownership'] = object_ownership
+
+ # -- Bucket ACL
+ if acl:
+ try:
+ s3_client.put_bucket_acl(Bucket=name, ACL=acl)
+ result['acl'] = acl
+ changed = True
+ except KeyError as e:
+ # Some non-AWS providers appear to return policy documents that aren't
+ # compatible with AWS, cleanly catch KeyError so users can continue to use
+ # other features.
+ module.fail_json_aws(e, msg="Failed to get bucket acl block")
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket ACL")
+ except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Access denied trying to update bucket ACL")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to update bucket ACL")
+
+ # Module exit
+ module.exit_json(changed=changed, name=name, **result)
+
+
+def bucket_exists(s3_client, bucket_name):
+ try:
+ s3_client.head_bucket(Bucket=bucket_name)
+ bucket_exists = True
+ except is_boto3_error_code('404'):
+ bucket_exists = False
+ return bucket_exists
+
+
+@AWSRetry.exponential_backoff(max_delay=120)
+def create_bucket(s3_client, bucket_name, location):
+ try:
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ if len(configuration) > 0:
+ s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
+ else:
+ s3_client.create_bucket(Bucket=bucket_name)
+ return True
+ except is_boto3_error_code('BucketAlreadyOwnedByYou'):
+ # We should never get here since we check the bucket presence before calling the create_or_update_bucket
+ # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
+ return False
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_tagging(s3_client, bucket_name, tags):
+ s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_policy(s3_client, bucket_name, policy):
+ s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_policy(s3_client, bucket_name):
+ s3_client.delete_bucket_policy(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_policy(s3_client, bucket_name):
+ try:
+ current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
+ except is_boto3_error_code('NoSuchBucketPolicy'):
+ return None
+
+ return current_policy
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_request_payment(s3_client, bucket_name, payer):
+ s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_request_payment(s3_client, bucket_name):
+ return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_versioning(s3_client, bucket_name):
+ return s3_client.get_bucket_versioning(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_versioning(s3_client, bucket_name, required_versioning):
+ s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_encryption(s3_client, bucket_name):
+ try:
+ result = s3_client.get_bucket_encryption(Bucket=bucket_name)
+ return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
+ except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'):
+ return None
+ except (IndexError, KeyError):
+ return None
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_key(s3_client, bucket_name):
+ try:
+ result = s3_client.get_bucket_encryption(Bucket=bucket_name)
+ return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('BucketKeyEnabled')
+ except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'):
+ return None
+ except (IndexError, KeyError):
+ return None
+
+
+def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption):
+ max_retries = 3
+ for retries in range(1, max_retries + 1):
+ try:
+ put_bucket_encryption(s3_client, name, expected_encryption)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to set bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption,
+ should_fail=(retries == max_retries), retries=5)
+ if current_encryption == expected_encryption:
+ return current_encryption
+
+ # We shouldn't get here, the only time this should happen is if
+ # current_encryption != expected_encryption and retries == max_retries
+ # Which should use module.fail_json and fail out first.
+ module.fail_json(msg='Failed to apply bucket encryption',
+ current=current_encryption, expected=expected_encryption, retries=retries)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_encryption(s3_client, bucket_name, encryption):
+ server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
+ s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
+
+
+def put_bucket_key_with_retry(module, s3_client, name, expected_encryption):
+ max_retries = 3
+ for retries in range(1, max_retries + 1):
+ try:
+ put_bucket_key(s3_client, name, expected_encryption)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to set bucket Key")
+ current_encryption = wait_bucket_key_is_applied(module, s3_client, name, expected_encryption,
+ should_fail=(retries == max_retries), retries=5)
+ if current_encryption == expected_encryption:
+ return current_encryption
+
+ # We shouldn't get here, the only time this should happen is if
+ # current_encryption != expected_encryption and retries == max_retries
+ # Which should use module.fail_json and fail out first.
+ module.fail_json(msg='Failed to set bucket key',
+ current=current_encryption, expected=expected_encryption, retries=retries)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_key(s3_client, bucket_name, encryption):
+ # server_side_encryption_configuration ={'Rules': [{'BucketKeyEnabled': encryption}]}
+ encryption_status = s3_client.get_bucket_encryption(Bucket=bucket_name)
+ encryption_status['ServerSideEncryptionConfiguration']['Rules'][0]['BucketKeyEnabled'] = encryption
+ s3_client.put_bucket_encryption(
+ Bucket=bucket_name,
+ ServerSideEncryptionConfiguration=encryption_status[
+ 'ServerSideEncryptionConfiguration']
+ )
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_tagging(s3_client, bucket_name):
+ s3_client.delete_bucket_tagging(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_encryption(s3_client, bucket_name):
+ s3_client.delete_bucket_encryption(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=['OperationAborted'])
+def delete_bucket(s3_client, bucket_name):
+ try:
+ s3_client.delete_bucket(Bucket=bucket_name)
+ except is_boto3_error_code('NoSuchBucket'):
+ # This means bucket should have been in a deleting state when we checked it existence
+ # We just ignore the error
+ pass
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_public_access(s3_client, bucket_name, public_acces):
+ '''
+ Put new public access block to S3 bucket
+ '''
+ s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=public_acces)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_public_access(s3_client, bucket_name):
+ '''
+ Delete public access block from S3 bucket
+ '''
+ s3_client.delete_public_access_block(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_ownership(s3_client, bucket_name):
+ '''
+ Delete bucket ownership controls from S3 bucket
+ '''
+ s3_client.delete_bucket_ownership_controls(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_ownership(s3_client, bucket_name, target):
+ '''
+ Put bucket ownership controls for S3 bucket
+ '''
+ s3_client.put_bucket_ownership_controls(
+ Bucket=bucket_name,
+ OwnershipControls={
+ 'Rules': [{'ObjectOwnership': target}]
+ })
+
+
+def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ current_policy = get_bucket_policy(s3_client, bucket_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+
+ if compare_policies(current_policy, expected_policy):
+ time.sleep(5)
+ else:
+ return current_policy
+ if should_fail:
+ module.fail_json(msg="Bucket policy failed to apply in the expected time",
+ requested_policy=expected_policy, live_policy=current_policy)
+ else:
+ return None
+
+
+def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket request payment")
+ if requester_pays_status != expected_payer:
+ time.sleep(5)
+ else:
+ return requester_pays_status
+ if should_fail:
+ module.fail_json(msg="Bucket request payment failed to apply in the expected time",
+ requested_status=expected_payer, live_status=requester_pays_status)
+ else:
+ return None
+
+
+def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
+ for dummy in range(0, retries):
+ try:
+ encryption = get_bucket_encryption(s3_client, bucket_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
+ if encryption != expected_encryption:
+ time.sleep(5)
+ else:
+ return encryption
+
+ if should_fail:
+ module.fail_json(msg="Bucket encryption failed to apply in the expected time",
+ requested_encryption=expected_encryption, live_encryption=encryption)
+
+ return encryption
+
+
+def wait_bucket_key_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
+ for dummy in range(0, retries):
+ try:
+ encryption = get_bucket_key(s3_client, bucket_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
+ if encryption != expected_encryption:
+ time.sleep(5)
+ else:
+ return encryption
+
+ if should_fail:
+ module.fail_json(msg="Bucket Key failed to apply in the expected time",
+ requested_encryption=expected_encryption, live_encryption=encryption)
+ return encryption
+
+
+def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
+ for dummy in range(0, 24):
+ try:
+ versioning_status = get_bucket_versioning(s3_client, bucket_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
+ if versioning_status.get('Status') != required_versioning:
+ time.sleep(8)
+ else:
+ return versioning_status
+ module.fail_json(msg="Bucket versioning failed to apply in the expected time",
+ requested_versioning=required_versioning, live_versioning=versioning_status)
+
+
+def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
+ for dummy in range(0, 12):
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+ if current_tags_dict != expected_tags_dict:
+ time.sleep(5)
+ else:
+ return current_tags_dict
+ module.fail_json(msg="Bucket tags failed to apply in the expected time",
+ requested_tags=expected_tags_dict, live_tags=current_tags_dict)
+
+
+def get_current_bucket_tags_dict(s3_client, bucket_name):
+ try:
+ current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
+ except is_boto3_error_code('NoSuchTagSet'):
+ return {}
+ # The Ceph S3 API returns a different error code to AWS
+ except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except
+ return {}
+
+ return boto3_tag_list_to_ansible_dict(current_tags)
+
+
+def get_bucket_public_access(s3_client, bucket_name):
+ '''
+ Get current bucket public access block
+ '''
+ try:
+ bucket_public_access_block = s3_client.get_public_access_block(Bucket=bucket_name)
+ return bucket_public_access_block['PublicAccessBlockConfiguration']
+ except is_boto3_error_code('NoSuchPublicAccessBlockConfiguration'):
+ return {}
+
+
+def get_bucket_ownership_cntrl(s3_client, bucket_name):
+ '''
+ Get current bucket public access block
+ '''
+ try:
+ bucket_ownership = s3_client.get_bucket_ownership_controls(Bucket=bucket_name)
+ return bucket_ownership['OwnershipControls']['Rules'][0]['ObjectOwnership']
+ except is_boto3_error_code(['OwnershipControlsNotFoundError', 'NoSuchOwnershipControls']):
+ return None
+
+
+def paginated_list(s3_client, **pagination_params):
+ pg = s3_client.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versions_list(s3_client, **pagination_params):
+ try:
+ pg = s3_client.get_paginator('list_object_versions')
+ for page in pg.paginate(**pagination_params):
+ # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
+ yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
+ except is_boto3_error_code('NoSuchBucket'):
+ yield []
+
+
+def destroy_bucket(s3_client, module):
+
+ force = module.params.get("force")
+ name = module.params.get("name")
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ module.exit_json(changed=False)
+
+ if force:
+ # if there are contents then we need to delete them (including versions) before we can delete the bucket
+ try:
+ for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
+ formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
+ for fk in formatted_keys:
+ # remove VersionId from cases where they are `None` so that
+ # unversioned objects are deleted using `DeleteObject`
+ # rather than `DeleteObjectVersion`, improving backwards
+ # compatibility with older IAM policies.
+ if not fk.get('VersionId'):
+ fk.pop('VersionId')
+
+ if formatted_keys:
+ resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
+ if resp.get('Errors'):
+ module.fail_json(
+ msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
+ ', '.join([k['Key'] for k in resp['Errors']])
+ ),
+ errors=resp['Errors'], response=resp
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket")
+
+ try:
+ delete_bucket(s3_client, name)
+ s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket")
+
+ module.exit_json(changed=True)
+
+
+def is_fakes3(endpoint_url):
+ """ Return True if endpoint_url has scheme fakes3:// """
+ if endpoint_url is not None:
+ return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url):
+ if ceph: # TODO - test this
+ ceph = urlparse(endpoint_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https',
+ region=location, endpoint=endpoint_url, **aws_connect_kwargs)
+ elif is_fakes3(endpoint_url):
+ fakes3 = urlparse(endpoint_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs)
+ return boto3_conn(**params)
+
+
+def main():
+
+ argument_spec = dict(
+ force=dict(default=False, type='bool'),
+ policy=dict(type='json'),
+ name=dict(required=True),
+ requester_pays=dict(type='bool'),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ versioning=dict(type='bool'),
+ ceph=dict(default=False, type='bool', aliases=['rgw']),
+ encryption=dict(choices=['none', 'AES256', 'aws:kms']),
+ encryption_key_id=dict(),
+ bucket_key_enabled=dict(type='bool'),
+ public_access=dict(type='dict', options=dict(
+ block_public_acls=dict(type='bool', default=False),
+ ignore_public_acls=dict(type='bool', default=False),
+ block_public_policy=dict(type='bool', default=False),
+ restrict_public_buckets=dict(type='bool', default=False))),
+ delete_public_access=dict(type='bool', default=False),
+ object_ownership=dict(type='str', choices=['BucketOwnerEnforced', 'BucketOwnerPreferred', 'ObjectWriter']),
+ delete_object_ownership=dict(type='bool', default=False),
+ acl=dict(type='str', choices=['private', 'public-read', 'public-read-write', 'authenticated-read']),
+ validate_bucket_name=dict(type='bool', default=True),
+ )
+
+ required_by = dict(
+ encryption_key_id=('encryption',),
+ )
+
+ mutually_exclusive = [
+ ['public_access', 'delete_public_access'],
+ ['delete_object_ownership', 'object_ownership']
+ ]
+
+ required_if = [
+ ['ceph', True, ['endpoint_url']],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_by=required_by,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive
+ )
+
+ region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if module.params.get('validate_bucket_name'):
+ validate_bucket_name(module, module.params["name"])
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ endpoint_url = module.params.get('endpoint_url')
+ ceph = module.params.get('ceph')
+
+ # Look at endpoint_url and tweak connection settings
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not endpoint_url and 'S3_URL' in os.environ:
+ endpoint_url = os.environ['S3_URL']
+ module.deprecate(
+ "Support for the 'S3_URL' environment variable has been "
+ "deprecated. We recommend using the 'endpoint_url' module "
+ "parameter. Alternatively, the 'AWS_URL' environment variable can"
+ "be used instead.",
+ date='2024-12-01', collection_name='amazon.aws',
+ )
+
+ # if connecting to Ceph RGW, Walrus or fakes3
+ if endpoint_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url)
+
+ if s3_client is None: # this should never happen
+ module.fail_json(msg='Unknown error, failed to create s3 connection, no information available.')
+
+ state = module.params.get("state")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+
+ # Parameter validation
+ if encryption_key_id is not None and encryption != 'aws:kms':
+ module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
+
+ if state == 'present':
+ create_or_update_bucket(s3_client, module, location)
+ elif state == 'absent':
+ destroy_bucket(s3_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object.py b/ansible_collections/amazon/aws/plugins/modules/s3_object.py
new file mode 100644
index 00000000..22d42030
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_object.py
@@ -0,0 +1,1286 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_object
+version_added: 1.0.0
+short_description: Manage objects in S3
+description:
+ - This module allows the user to manage the objects and directories within S3 buckets. Includes
+ support for creating and deleting objects and directories, retrieving objects as files or
+ strings, generating download links and copying objects that are already stored in Amazon S3.
+ - Support for creating or deleting S3 buckets with this module has been deprecated and will be
+ removed in release 6.0.0.
+ - S3 buckets can be created or deleted using the M(amazon.aws.s3_bucket) module.
+ - Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID.
+ - When using non-AWS services, I(endpoint_url) should be specified.
+options:
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ type: str
+ dest:
+ description:
+ - The destination file path when downloading an object/key when I(mode=get).
+ - Ignored when I(mode) is not C(get).
+ type: path
+ encrypt:
+ description:
+ - Asks for server-side encryption of the objects when I(mode=put) or I(mode=copy).
+ - Ignored when I(mode) is neither C(put) nor C(copy).
+ default: true
+ type: bool
+ encryption_mode:
+ description:
+ - The encryption mode to use if I(encrypt=true).
+ default: AES256
+ choices:
+ - AES256
+ - aws:kms
+ type: str
+ expiry:
+ description:
+ - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a
+ I(mode=put) or I(mode=geturl) operation.
+ - Ignored when I(mode) is neither C(put) nor C(geturl).
+ default: 600
+ aliases: ['expiration']
+ type: int
+ headers:
+ description:
+ - Custom headers to use when I(mode=put) as a dictionary of key value pairs.
+ - Ignored when I(mode) is not C(put).
+ type: dict
+ marker:
+ description:
+ - Specifies the key to start with when using list mode. Object keys are returned in
+ alphabetical order, starting with key after the marker in order.
+ type: str
+ max_keys:
+ description:
+ - Max number of results to return when I(mode=list), set this if you want to retrieve fewer
+ than the default 1000 keys.
+ - Ignored when I(mode) is not C(list).
+ default: 1000
+ type: int
+ metadata:
+ description:
+ - Metadata to use when I(mode=put) or I(mode=copy) as a dictionary of key value pairs.
+ type: dict
+ mode:
+ description:
+ - Switches the module behaviour between
+ - 'C(put): upload'
+ - 'C(get): download'
+ - 'C(geturl): return download URL'
+ - 'C(getstr): download object as string'
+ - 'C(list): list keys'
+ - 'C(create): create bucket directories'
+ - 'C(delete): delete bucket directories'
+ - 'C(delobj): delete object'
+ - 'C(copy): copy object that is already stored in another bucket'
+ - Support for creating and deleting buckets has been deprecated and will
+ be removed in release 6.0.0. To create and manage the bucket itself
+ please use the M(amazon.aws.s3_bucket) module.
+ required: true
+ choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy']
+ type: str
+ object:
+ description:
+ - Keyname of the object inside the bucket.
+ - Can be used to create "virtual directories", see examples.
+ type: str
+ sig_v4:
+ description:
+ - Forces the Boto SDK to use Signature Version 4.
+ - Only applies to get modes, I(mode=get), I(mode=getstr), I(mode=geturl).
+ default: true
+ type: bool
+ version_added: 5.0.0
+ permission:
+ description:
+ - This option lets the user set the canned permissions on the object/bucket that are created.
+ The permissions that can be set are C(private), C(public-read), C(public-read-write),
+ C(authenticated-read) for a bucket or C(private), C(public-read), C(public-read-write),
+ C(aws-exec-read), C(authenticated-read), C(bucket-owner-read), C(bucket-owner-full-control)
+ for an object. Multiple permissions can be specified as a list; although only the first one
+ will be used during the initial upload of the file.
+ - For a full list of permissions see the AWS documentation
+ U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl).
+ default: ['private']
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Limits the response to keys that begin with the specified prefix for list mode.
+ default: ""
+ type: str
+ version:
+ description:
+ - Version ID of the object inside the bucket. Can be used to get a specific version of a file
+ if versioning is enabled in the target bucket.
+ type: str
+ overwrite:
+ description:
+ - Force overwrite either locally on the filesystem or remotely with the object/key.
+ - Used when I(mode=put) or I(mode=get).
+ - Ignored when when I(mode) is neither C(put) nor C(get).
+ - Must be a Boolean, C(always), C(never), C(different) or C(latest).
+ - C(true) is the same as C(always).
+ - C(false) is equal to C(never).
+ - When this is set to C(different) the MD5 sum of the local file is compared with the 'ETag'
+ of the object/key in S3. The ETag may or may not be an MD5 digest of the object data. See
+ the ETag response header here
+ U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html).
+ - When I(mode=get) and I(overwrite=latest) the last modified timestamp of local file
+ is compared with the 'LastModified' of the object/key in S3.
+ default: 'different'
+ aliases: ['force']
+ type: str
+ retries:
+ description:
+ - On recoverable failure, how many times to retry before actually failing.
+ default: 0
+ type: int
+ aliases: ['retry']
+ dualstack:
+ description:
+ - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
+ type: bool
+ default: false
+ ceph:
+ description:
+ - Enable API compatibility with Ceph RGW.
+ - It takes into account the S3 API subset working with Ceph in order to provide the same module
+ behaviour where possible.
+ - Requires I(endpoint_url) if I(ceph=true).
+ aliases: ['rgw']
+ default: false
+ type: bool
+ src:
+ description:
+ - The source file path when performing a C(put) operation.
+ - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put)
+ otherwise ignored.
+ type: path
+ content:
+ description:
+ - The content to C(put) into an object.
+ - The parameter value will be treated as a string and converted to UTF-8 before sending it to
+ S3.
+ - To send binary data, use the I(content_base64) parameter instead.
+ - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put)
+ otherwise ignored.
+ version_added: "1.3.0"
+ type: str
+ content_base64:
+ description:
+ - The base64-encoded binary data to C(put) into an object.
+ - Use this if you need to put raw binary data, and don't forget to encode in base64.
+ - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put)
+ otherwise ignored.
+ version_added: "1.3.0"
+ type: str
+ ignore_nonexistent_bucket:
+ description:
+ - Overrides initial bucket lookups in case bucket or IAM policies are restrictive.
+ - This can be useful when a user may have the C(GetObject) permission but no other
+ permissions. In which case using I(mode=get) will fail unless
+ I(ignore_nonexistent_bucket=true) is specified.
+ type: bool
+ default: false
+ encryption_kms_key_id:
+ description:
+ - KMS key id to use when encrypting objects using I(encrypting=aws:kms).
+ - Ignored if I(encryption) is not C(aws:kms).
+ type: str
+ copy_src:
+ description:
+ - The source details of the object to copy.
+ - Required if I(mode=copy).
+ type: dict
+ version_added: 2.0.0
+ suboptions:
+ bucket:
+ type: str
+ description:
+ - The name of the source bucket.
+ required: true
+ object:
+ type: str
+ description:
+ - key name of the source object.
+ required: true
+ version_id:
+ type: str
+ description:
+ - version ID of the source object.
+ validate_bucket_name:
+ description:
+ - Whether the bucket name should be validated to conform to AWS S3 naming rules.
+ - On by default, this may be disabled for S3 backends that do not enforce these rules.
+ - See the Amazon documentation for more information about bucket naming rules
+ U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
+ type: bool
+ version_added: 3.1.0
+ default: True
+author:
+ - "Lester Wade (@lwade)"
+ - "Sloane Hertel (@s-hertel)"
+ - "Alina Buzachis (@alinabuzachis)"
+notes:
+ - Support for I(tags) and I(purge_tags) was added in release 2.0.0.
+ - In release 5.0.0 the I(s3_url) parameter was merged into the I(endpoint_url) parameter,
+ I(s3_url) remains as an alias for I(endpoint_url).
+ - For Walrus I(endpoint_url) should be set to the FQDN of the endpoint with neither scheme nor path.
+ - Support for the C(S3_URL) environment variable has been
+ deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter
+ or the C(AWS_URL) environment variable.
+extends_documentation_fragment:
+ - amazon.aws.aws
+ - amazon.aws.ec2
+ - amazon.aws.tags
+ - amazon.aws.boto3
+'''
+
+EXAMPLES = '''
+- name: Simple PUT operation
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+
+- name: PUT operation from a rendered template
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /object.yaml
+ content: "{{ lookup('template', 'templates/object.yaml.j2') }}"
+ mode: put
+
+- name: Simple PUT operation in Ceph RGW S3
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ ceph: true
+ endpoint_url: "http://localhost:8000"
+
+- name: Simple GET operation
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Get a specific version of an object.
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ version: 48c9ee5131af7a716edc22df9772aa6f
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: PUT/upload with metadata
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
+
+- name: PUT/upload with custom headers
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
+
+- name: List keys simple
+ amazon.aws.s3_object:
+ bucket: mybucket
+ mode: list
+
+- name: List keys all options
+ amazon.aws.s3_object:
+ bucket: mybucket
+ mode: list
+ prefix: /my/desired/
+ marker: /my/desired/0023.txt
+ max_keys: 472
+
+- name: Create an empty bucket
+ amazon.aws.s3_object:
+ bucket: mybucket
+ mode: create
+ permission: public-read
+
+- name: Create a bucket with key as directory, in the EU region
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+ region: eu-west-1
+
+- name: Delete a bucket and all contents
+ amazon.aws.s3_object:
+ bucket: mybucket
+ mode: delete
+
+- name: GET an object but don't download if the file checksums match. New in 2.0
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+ overwrite: different
+
+- name: Delete an object from a bucket
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ mode: delobj
+
+- name: Copy an object already stored in another bucket
+ amazon.aws.s3_object:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ mode: copy
+ copy_src:
+ bucket: srcbucket
+ object: /source/key.txt
+'''
+
+RETURN = '''
+msg:
+ description: Message indicating the status of the operation.
+ returned: always
+ type: str
+ sample: PUT operation complete
+url:
+ description: URL of the object.
+ returned: (for put and geturl operations)
+ type: str
+ sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
+expiry:
+ description: Number of seconds the presigned url is valid for.
+ returned: (for geturl operation)
+ type: int
+ sample: 600
+contents:
+ description: Contents of the object as string.
+ returned: (for getstr operation)
+ type: str
+ sample: "Hello, world!"
+s3_keys:
+ description: List of object keys.
+ returned: (for list operation)
+ type: list
+ elements: str
+ sample:
+ - prefix1/
+ - prefix1/key1
+ - prefix1/key2
+'''
+
+import mimetypes
+import os
+import io
+from ssl import SSLError
+import base64
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.basic import to_text
+from ansible.module_utils.basic import to_native
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.s3 import HAS_MD5
+from ansible_collections.amazon.aws.plugins.module_utils.s3 import calculate_etag
+from ansible_collections.amazon.aws.plugins.module_utils.s3 import calculate_etag_content
+from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name
+
+IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
+
+
+class Sigv4Required(Exception):
+ pass
+
+
+def key_check(module, s3, bucket, obj, version=None, validate=True):
+ try:
+ if version:
+ s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ s3.head_object(Bucket=bucket, Key=obj)
+ except is_boto3_error_code('404'):
+ return False
+ except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except
+ if validate is True:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+
+ return True
+
+
+def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content=None):
+ s3_etag = get_etag(s3, bucket, obj, version=version)
+ if local_file is not None:
+ local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
+ else:
+ local_etag = calculate_etag_content(module, content, s3_etag, s3, bucket, obj, version)
+
+ return s3_etag == local_etag
+
+
+def get_etag(s3, bucket, obj, version=None):
+ try:
+ if version:
+ key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key_check = s3.head_object(Bucket=bucket, Key=obj)
+ if not key_check:
+ return None
+ return key_check['ETag']
+ except is_boto3_error_code('404'):
+ return None
+
+
+def get_s3_last_modified_timestamp(s3, bucket, obj, version=None):
+ if version:
+ key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key_check = s3.head_object(Bucket=bucket, Key=obj)
+ if not key_check:
+ return None
+ return key_check['LastModified'].timestamp()
+
+
+def is_local_object_latest(module, s3, bucket, obj, version=None, local_file=None):
+ s3_last_modified = get_s3_last_modified_timestamp(s3, bucket, obj, version)
+ if os.path.exists(local_file) is False:
+ return False
+ else:
+ local_last_modified = os.path.getmtime(local_file)
+
+ return s3_last_modified <= local_last_modified
+
+
+def bucket_check(module, s3, bucket, validate=True):
+ exists = True
+ try:
+ s3.head_bucket(Bucket=bucket)
+ except is_boto3_error_code('404'):
+ return False
+ except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except
+ if validate is True:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Invalid endpoint provided")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ return exists
+
+
+def create_bucket(module, s3, bucket, location=None):
+ module.deprecate('Support for creating S3 buckets using the s3_object module'
+ ' has been deprecated. Please use the ``s3_bucket`` module'
+ ' instead.', version='6.0.0', collection_name='amazon.aws')
+ if module.check_mode:
+ module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ try:
+ if len(configuration) > 0:
+ s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
+ else:
+ s3.create_bucket(Bucket=bucket)
+ if module.params.get('permission'):
+ # Wait for the bucket to exist before setting ACLs
+ s3.get_waiter('bucket_exists').wait(Bucket=bucket)
+ for acl in module.params.get('permission'):
+ AWSRetry.jittered_backoff(
+ max_delay=120, catch_extra_error_codes=['NoSuchBucket']
+ )(s3.put_bucket_acl)(ACL=acl, Bucket=bucket)
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS):
+ module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
+
+ if bucket:
+ return True
+
+
+def paginated_list(s3, **pagination_params):
+ pg = s3.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versioned_list_with_fallback(s3, **pagination_params):
+ try:
+ versioned_pg = s3.get_paginator('list_object_versions')
+ for page in versioned_pg.paginate(**pagination_params):
+ delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
+ current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
+ yield delete_markers + current_objects
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']):
+ for page in paginated_list(s3, **pagination_params):
+ yield [{'Key': data['Key']} for data in page]
+
+
+def list_keys(module, s3, bucket, prefix, marker, max_keys):
+ pagination_params = {'Bucket': bucket}
+ for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
+ pagination_params[param_name] = param_value
+ try:
+ keys = sum(paginated_list(s3, **pagination_params), [])
+ module.exit_json(msg="LIST operation complete", s3_keys=keys)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
+
+
+def delete_bucket(module, s3, bucket):
+ module.deprecate('Support for deleting S3 buckets using the s3_object module'
+ ' has been deprecated. Please use the ``s3_bucket`` module'
+ ' instead.', version='6.0.0', collection_name='amazon.aws')
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ exists = bucket_check(module, s3, bucket)
+ if exists is False:
+ return False
+ # if there are contents then we need to delete them before we can delete the bucket
+ for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
+ if keys:
+ s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
+ s3.delete_bucket(Bucket=bucket)
+ return True
+ except is_boto3_error_code('NoSuchBucket'):
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
+
+
+def delete_key(module, s3, bucket, obj):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ s3.delete_object(Bucket=bucket, Key=obj)
+ module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
+
+
+def create_dirkey(module, s3, bucket, obj, encrypt, expiry):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
+ if encrypt:
+ params['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+
+ s3.put_object(**params)
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS):
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
+
+ # Tags
+ tags, _changed = ensure_tags(s3, module, bucket, obj)
+
+ try:
+ url = s3.generate_presigned_url(ClientMethod='put_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to generate presigned URL")
+
+ url = put_download_url(module, s3, bucket, obj, expiry)
+
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), url=url, tags=tags, changed=True)
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def option_in_extra_args(option):
+ temp_option = option.replace('-', '').lower()
+
+ allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
+ 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
+ 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
+ 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
+ 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
+ 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
+ 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
+
+ if temp_option in allowed_extra_args:
+ return allowed_extra_args[temp_option]
+
+
+def upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=None, content=None, acl_disabled=False):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ extra = {}
+ if encrypt:
+ extra['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+ if metadata:
+ extra['Metadata'] = {}
+
+ # determine object metadata and extra arguments
+ for option in metadata:
+ extra_args_option = option_in_extra_args(option)
+ if extra_args_option is not None:
+ extra[extra_args_option] = metadata[option]
+ else:
+ extra['Metadata'][option] = metadata[option]
+
+ if module.params.get('permission'):
+ permissions = module.params['permission']
+ if isinstance(permissions, str):
+ extra['ACL'] = permissions
+ elif isinstance(permissions, list):
+ extra['ACL'] = permissions[0]
+
+ if 'ContentType' not in extra:
+ content_type = None
+ if src is not None:
+ content_type = mimetypes.guess_type(src)[0]
+ if content_type is None:
+ # s3 default content type
+ content_type = 'binary/octet-stream'
+ extra['ContentType'] = content_type
+
+ if src is not None:
+ s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
+ else:
+ f = io.BytesIO(content)
+ s3.upload_fileobj(Fileobj=f, Bucket=bucket, Key=obj, ExtraArgs=extra)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to complete PUT operation.")
+ if not acl_disabled:
+ try:
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS):
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Unable to set object ACL")
+
+ # Tags
+ tags, _changed = ensure_tags(s3, module, bucket, obj)
+
+ url = put_download_url(module, s3, bucket, obj, expiry)
+
+ module.exit_json(msg="PUT operation complete", url=url, tags=tags, changed=True)
+
+
+def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ # retries is the number of loops; range/xrange needs to be one
+ # more to get that count of loops.
+ try:
+ # Note: Something of a permissions related hack
+ # get_object returns the HEAD information, plus a *stream* which can be read.
+ # because the stream's dropped on the floor, we never pull the data and this is the
+ # functional equivalent of calling get_head which still relying on the 'GET' permission
+ if version:
+ s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ s3.get_object(Bucket=bucket, Key=obj)
+ except is_boto3_error_code(['404', '403']) as e:
+ # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
+ # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+ except is_boto3_error_message('require AWS Signature Version 4'): # pylint: disable=duplicate-except
+ raise Sigv4Required()
+ except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+
+ optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
+ for x in range(0, retries + 1):
+ try:
+ s3.download_file(bucket, obj, dest, **optional_kwargs)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
+ # otherwise, try again, this may be a transient timeout.
+ except SSLError as e: # will ClientError catch SSLError?
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="s3 download failed")
+ # otherwise, try again, this may be a transient timeout.
+
+
+def download_s3str(module, s3, bucket, obj, version=None, validate=True):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ try:
+ if version:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
+ else:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except is_boto3_error_message('require AWS Signature Version 4'):
+ raise Sigv4Required()
+ except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+
+
+def get_download_url(module, s3, bucket, obj, expiry, tags=None, changed=True):
+ try:
+ url = s3.generate_presigned_url(ClientMethod='get_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ module.exit_json(msg="Download url:", url=url, tags=tags, expiry=expiry, changed=changed)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while getting download url.")
+
+
+def put_download_url(module, s3, bucket, obj, expiry):
+ try:
+ url = s3.generate_presigned_url(ClientMethod='put_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to generate presigned URL")
+ return url
+
+
+def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag):
+ if module.check_mode:
+ module.exit_json(msg="COPY operation skipped - running in check mode", changed=True)
+ try:
+ params = {'Bucket': bucket, 'Key': obj}
+ bucketsrc = {'Bucket': module.params['copy_src'].get('bucket'), 'Key': module.params['copy_src'].get('object')}
+ version = None
+ if module.params['copy_src'].get('version_id') is not None:
+ version = module.params['copy_src'].get('version_id')
+ bucketsrc.update({'VersionId': version})
+ if not key_check(module, s3, bucketsrc['Bucket'], bucketsrc['Key'], version=version, validate=validate):
+ # Key does not exist in source bucket
+ module.exit_json(msg="Key %s does not exist in bucket %s." % (bucketsrc['Key'], bucketsrc['Bucket']), changed=False)
+
+ s_etag = get_etag(s3, bucketsrc['Bucket'], bucketsrc['Key'], version=version)
+ if s_etag == d_etag:
+ # Tags
+ tags, changed = ensure_tags(s3, module, bucket, obj)
+ if not changed:
+ module.exit_json(msg="ETag from source and destination are the same", changed=False)
+ else:
+ module.exit_json(msg="tags successfully updated.", changed=changed, tags=tags)
+ else:
+ params.update({'CopySource': bucketsrc})
+ if encrypt:
+ params['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+ if metadata:
+ params['Metadata'] = {}
+ # determine object metadata and extra arguments
+ for option in metadata:
+ extra_args_option = option_in_extra_args(option)
+ if extra_args_option is not None:
+ params[extra_args_option] = metadata[option]
+ else:
+ params['Metadata'][option] = metadata[option]
+ s3.copy_object(**params)
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ # Tags
+ tags, changed = ensure_tags(s3, module, bucket, obj)
+ module.exit_json(msg="Object copied from bucket %s to bucket %s." % (bucketsrc['Bucket'], bucket), tags=tags, changed=True)
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS):
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while copying object %s from bucket %s." % (obj, module.params['copy_src'].get('Bucket')))
+
+
+def is_fakes3(endpoint_url):
+ """ Return True if endpoint_url has scheme fakes3:// """
+ if endpoint_url is not None:
+ return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False):
+ if ceph: # TODO - test this
+ ceph = urlparse(endpoint_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https',
+ region=location, endpoint=endpoint_url, **aws_connect_kwargs)
+ elif is_fakes3(endpoint_url):
+ fakes3 = urlparse(endpoint_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs)
+ if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ elif module.params['mode'] in ('get', 'getstr', 'geturl') and sig_4:
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ if module.params['dualstack']:
+ dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
+ if 'config' in params:
+ params['config'] = params['config'].merge(dualconf)
+ else:
+ params['config'] = dualconf
+ return boto3_conn(**params)
+
+
+def get_current_object_tags_dict(s3, bucket, obj, version=None):
+ try:
+ if version:
+ current_tags = s3.get_object_tagging(Bucket=bucket, Key=obj, VersionId=version).get('TagSet')
+ else:
+ current_tags = s3.get_object_tagging(Bucket=bucket, Key=obj).get('TagSet')
+ except is_boto3_error_code('NoSuchTagSet'):
+ return {}
+ except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except
+ return {}
+
+ return boto3_tag_list_to_ansible_dict(current_tags)
+
+
+@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_object_tagging(s3, bucket, obj, tags):
+ s3.put_object_tagging(Bucket=bucket, Key=obj, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
+
+
+@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_object_tagging(s3, bucket, obj):
+ s3.delete_object_tagging(Bucket=bucket, Key=obj)
+
+
+def wait_tags_are_applied(module, s3, bucket, obj, expected_tags_dict, version=None):
+ for dummy in range(0, 12):
+ try:
+ current_tags_dict = get_current_object_tags_dict(s3, bucket, obj, version)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get object tags.")
+ if current_tags_dict != expected_tags_dict:
+ time.sleep(5)
+ else:
+ return current_tags_dict
+
+ module.fail_json(msg="Object tags failed to apply in the expected time.",
+ requested_tags=expected_tags_dict, live_tags=current_tags_dict)
+
+
+def ensure_tags(client, module, bucket, obj):
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ changed = False
+
+ try:
+ current_tags_dict = get_current_object_tags_dict(client, bucket, obj)
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS):
+ module.warn("GetObjectTagging is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning.")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to get object tags.")
+ else:
+ if tags is not None:
+ if not purge_tags:
+ # Ensure existing tags that aren't updated by desired tags remain
+ current_copy = current_tags_dict.copy()
+ current_copy.update(tags)
+ tags = current_copy
+ if current_tags_dict != tags:
+ if tags:
+ try:
+ put_object_tagging(client, bucket, obj, tags)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update object tags.")
+ else:
+ if purge_tags:
+ try:
+ delete_object_tagging(client, bucket, obj)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete object tags.")
+ current_tags_dict = wait_tags_are_applied(module, client, bucket, obj, tags)
+ changed = True
+ return current_tags_dict, changed
+
+
+def main():
+ # Beware: this module uses an action plugin (plugins/action/s3_object.py)
+ # so that src parameter can be either in 'files/' lookup path on the
+ # controller, *or* on the remote host that the task is executed on.
+
+ argument_spec = dict(
+ bucket=dict(required=True),
+ dest=dict(default=None, type='path'),
+ encrypt=dict(default=True, type='bool'),
+ encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
+ expiry=dict(default=600, type='int', aliases=['expiration']),
+ headers=dict(type='dict'),
+ marker=dict(default=""),
+ max_keys=dict(default=1000, type='int', no_log=False),
+ metadata=dict(type='dict'),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'], required=True),
+ sig_v4=dict(default=True, type='bool'),
+ object=dict(),
+ permission=dict(type='list', elements='str', default=['private']),
+ version=dict(default=None),
+ overwrite=dict(aliases=['force'], default='different'),
+ prefix=dict(default=""),
+ retries=dict(aliases=['retry'], type='int', default=0),
+ dualstack=dict(default=False, type='bool'),
+ ceph=dict(default=False, type='bool', aliases=['rgw']),
+ src=dict(type='path'),
+ content=dict(),
+ content_base64=dict(),
+ ignore_nonexistent_bucket=dict(default=False, type='bool'),
+ encryption_kms_key_id=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ purge_tags=dict(type='bool', default=True),
+ copy_src=dict(type='dict', options=dict(bucket=dict(required=True), object=dict(required=True), version_id=dict())),
+ validate_bucket_name=dict(type='bool', default=True),
+ )
+
+ required_if = [
+ ['ceph', True, ['endpoint_url']],
+ ['mode', 'put', ['object']],
+ ['mode', 'get', ['dest', 'object']],
+ ['mode', 'getstr', ['object']],
+ ['mode', 'geturl', ['object']],
+ ['mode', 'copy', ['copy_src']],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ mutually_exclusive=[['content', 'content_base64', 'src']],
+ )
+
+ bucket = module.params.get('bucket')
+ encrypt = module.params.get('encrypt')
+ expiry = module.params.get('expiry')
+ dest = module.params.get('dest', '')
+ headers = module.params.get('headers')
+ marker = module.params.get('marker')
+ max_keys = module.params.get('max_keys')
+ metadata = module.params.get('metadata')
+ mode = module.params.get('mode')
+ obj = module.params.get('object')
+ version = module.params.get('version')
+ overwrite = module.params.get('overwrite')
+ sig_v4 = module.params.get('sig_v4')
+ prefix = module.params.get('prefix')
+ retries = module.params.get('retries')
+ endpoint_url = module.params.get('endpoint_url')
+ dualstack = module.params.get('dualstack')
+ ceph = module.params.get('ceph')
+ src = module.params.get('src')
+ content = module.params.get('content')
+ content_base64 = module.params.get('content_base64')
+ ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
+
+ object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
+ bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
+
+ if module.params.get('validate_bucket_name'):
+ validate_bucket_name(module, bucket)
+
+ if overwrite not in ['always', 'never', 'different', 'latest']:
+ if module.boolean(overwrite):
+ overwrite = 'always'
+ else:
+ overwrite = 'never'
+
+ if overwrite == 'different' and not HAS_MD5:
+ module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
+
+ region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ if module.params.get('object'):
+ obj = module.params['object']
+ # If there is a top level object, do nothing - if the object starts with /
+ # remove the leading character to maintain compatibility with Ansible versions < 2.4
+ if obj.startswith('/'):
+ obj = obj[1:]
+
+ # Bucket deletion does not require obj. Prevents ambiguity with delobj.
+ if obj and mode == "delete":
+ module.fail_json(msg='Parameter obj cannot be used with mode=delete')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not endpoint_url and 'S3_URL' in os.environ:
+ endpoint_url = os.environ['S3_URL']
+ module.deprecate(
+ "Support for the 'S3_URL' environment variable has been "
+ "deprecated. We recommend using the 'endpoint_url' module "
+ "parameter. Alternatively, the 'AWS_URL' environment variable can "
+ "be used instead.",
+ date='2024-12-01', collection_name='amazon.aws',
+ )
+
+ if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url:
+ module.fail_json(msg='dualstack only applies to AWS S3')
+
+ # Look at endpoint_url and tweak connection settings
+ # if connecting to RGW, Walrus or fakes3
+ if endpoint_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_v4)
+
+ validate = not ignore_nonexistent_bucket
+
+ # check if bucket exists, if yes, check if ACL is disabled
+ acl_disabled = False
+ exists = bucket_check(module, s3, bucket)
+ if exists:
+ try:
+ ownership_controls = s3.get_bucket_ownership_controls(Bucket=bucket)['OwnershipControls']
+ if ownership_controls.get('Rules'):
+ object_ownership = ownership_controls['Rules'][0]['ObjectOwnership']
+ if object_ownership == 'BucketOwnerEnforced':
+ acl_disabled = True
+ # if bucket ownership controls are not found
+ except botocore.exceptions.ClientError:
+ pass
+
+ # separate types of ACLs
+ if not acl_disabled:
+ bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
+ object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
+ error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
+ if error_acl:
+ module.fail_json(msg='Unknown permission specified: %s' % error_acl)
+
+ # First, we check to see if the bucket exists, we get "bucket" returned.
+ bucketrtn = bucket_check(module, s3, bucket, validate=validate)
+
+ if validate and mode not in ('create', 'put', 'delete', 'copy') and not bucketrtn:
+ module.fail_json(msg="Source bucket cannot be found.")
+
+ if mode == 'get':
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn is False:
+ if version:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if dest and path_check(dest) and overwrite != 'always':
+ if overwrite == 'never':
+ module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
+ if overwrite == 'different' and etag_compare(module, s3, bucket, obj, version=version, local_file=dest):
+ module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
+ if overwrite == 'latest' and is_local_object_latest(module, s3, bucket, obj, version=version, local_file=dest):
+ module.exit_json(msg="Local object is latest, ignoreing. Use overwrite=always parameter to force.", changed=False)
+
+ try:
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True)
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+
+ if mode == 'put':
+
+ # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
+ # these were separated into the variables bucket_acl and object_acl above
+
+ if content is None and content_base64 is None and src is None:
+ module.fail_json(msg='Either content, content_base64 or src must be specified for PUT operations')
+ if src is not None and not path_check(src):
+ module.fail_json(msg='Local object "%s" does not exist for PUT operation' % (src))
+
+ keyrtn = None
+ if bucketrtn:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ else:
+ # If the bucket doesn't exist we should create it.
+ # only use valid bucket acls for create_bucket function
+ module.params['permission'] = bucket_acl
+ create_bucket(module, s3, bucket, location)
+
+ # the content will be uploaded as a byte string, so we must encode it first
+ bincontent = None
+ if content is not None:
+ bincontent = content.encode('utf-8')
+ if content_base64 is not None:
+ bincontent = base64.standard_b64decode(content_base64)
+
+ if keyrtn and overwrite != 'always':
+ if overwrite == 'never' or etag_compare(module, s3, bucket, obj, version=version, local_file=src, content=bincontent):
+ # Return the download URL for the existing object and ensure tags are updated
+ tags, tags_update = ensure_tags(s3, module, bucket, obj)
+ get_download_url(module, s3, bucket, obj, expiry, tags, changed=tags_update)
+
+ # only use valid object acls for the upload_s3file function
+ if not acl_disabled:
+ module.params['permission'] = object_acl
+ upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=src, content=bincontent, acl_disabled=acl_disabled)
+
+ # Delete an object from a bucket, not the entire bucket
+ if mode == 'delobj':
+ if obj is None:
+ module.fail_json(msg="object parameter is required")
+ if bucket:
+ deletertn = delete_key(module, s3, bucket, obj)
+ if deletertn is True:
+ module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Delete an entire bucket, including all objects in the bucket
+ if mode == 'delete':
+ if bucket:
+ deletertn = delete_bucket(module, s3, bucket)
+ if deletertn is True:
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Support for listing a set of keys
+ if mode == 'list':
+
+ # If the bucket does not exist then bail out
+ if not bucketrtn:
+ module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
+
+ list_keys(module, s3, bucket, prefix, marker, max_keys)
+
+ # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
+ # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
+ if mode == 'create':
+
+ # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
+ # these were separated above into the variables bucket_acl and object_acl
+
+ if bucket and not obj:
+ if bucketrtn:
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ # only use valid bucket acls when creating the bucket
+ module.params['permission'] = bucket_acl
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+ if bucketrtn:
+ if key_check(module, s3, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ # setting valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt, expiry)
+ else:
+ # only use valid bucket acls for the create_bucket function
+ module.params['permission'] = bucket_acl
+ create_bucket(module, s3, bucket, location)
+ # only use valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt, expiry)
+
+ # Support for grabbing the time-expired URL for an object in S3/Walrus.
+ if mode == 'geturl':
+ if not bucket and not obj:
+ module.fail_json(msg="Bucket and Object parameters must be set")
+
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ tags = get_current_object_tags_dict(s3, bucket, obj, version=version)
+ get_download_url(module, s3, bucket, obj, expiry, tags)
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if mode == 'getstr':
+ if bucket and obj:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ try:
+ download_s3str(module, s3, bucket, obj, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True)
+ download_s3str(module, s3, bucket, obj, version=version)
+ elif version is not None:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if mode == 'copy':
+ # if copying an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
+ # these were separated into the variables bucket_acl and object_acl above
+ d_etag = None
+ if bucketrtn:
+ d_etag = get_etag(s3, bucket, obj)
+ else:
+ # If the bucket doesn't exist we should create it.
+ # only use valid bucket acls for create_bucket function
+ module.params['permission'] = bucket_acl
+ create_bucket(module, s3, bucket, location)
+ # only use valid object acls for the copy operation
+ module.params['permission'] = object_acl
+ copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag)
+
+ module.exit_json(failed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py
new file mode 100644
index 00000000..88e66dc4
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py
@@ -0,0 +1,818 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: s3_object_info
+version_added: 5.0.0
+short_description: Gather information about objects in S3
+description:
+ - Describes objects in S3.
+ - Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID (only supports list_keys currently).
+ - When using non-AWS services, I(endpoint_url) should be specified.
+author:
+ - Mandar Vijay Kulkarni (@mandar242)
+options:
+ bucket_name:
+ description:
+ - The name of the bucket that contains the object.
+ required: true
+ type: str
+ object_name:
+ description:
+ - The name of the object.
+ - If not specified, a list of all objects in the specified bucket will be returned.
+ required: false
+ type: str
+ endpoint_url:
+ description:
+ - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
+ type: str
+ dualstack:
+ description:
+ - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
+ type: bool
+ default: false
+ ceph:
+ description:
+ - Enable API compatibility with Ceph RGW.
+ - It takes into account the S3 API subset working with Ceph in order to provide the same module
+ behaviour where possible.
+ - Requires I(endpoint_url) if I(ceph=true).
+ aliases: ['rgw']
+ default: false
+ type: bool
+ object_details:
+ description:
+ - Retrieve requested S3 object detailed information.
+ required: false
+ type: dict
+ suboptions:
+ object_acl:
+ description:
+ - Retreive S3 object ACL.
+ required: false
+ type: bool
+ default: false
+ object_legal_hold:
+ description:
+ - Retreive S3 object legal_hold.
+ required: false
+ type: bool
+ default: false
+ object_lock_configuration:
+ description:
+ - Retreive S3 object lock_configuration.
+ required: false
+ type: bool
+ default: false
+ object_retention:
+ description:
+ - Retreive S3 object retention.
+ required: false
+ type: bool
+ default: false
+ object_tagging:
+ description:
+ - Retreive S3 object Tags.
+ required: false
+ type: bool
+ default: false
+ object_attributes:
+ description:
+ - Retreive S3 object attributes.
+ - Requires minimum botocore version 1.24.7.
+ required: false
+ type: bool
+ default: false
+ attributes_list:
+ description:
+ - The fields/details that should be returned.
+ - Required when I(object_attributes) is C(true) in I(object_details).
+ type: list
+ elements: str
+ choices: ['ETag', 'Checksum', 'ObjectParts', 'StorageClass', 'ObjectSize']
+notes:
+ - Support for the C(S3_URL) environment variable has been
+ deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter
+ or the C(AWS_URL) environment variable.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+- amazon.aws.boto3
+
+'''
+
+EXAMPLES = r'''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Retrieve a list of objects in S3 bucket
+ amazon.aws.s3_object_info:
+ bucket_name: MyTestBucket
+
+- name: Retrieve a list of objects in Ceph RGW S3
+ amazon.aws.s3_object_info:
+ bucket_name: MyTestBucket
+ ceph: true
+ endpoint_url: "http://localhost:8000"
+
+- name: Retrieve object metadata without object itself
+ amazon.aws.s3_object_info:
+ bucket_name: MyTestBucket
+ object_name: MyTestObjectKey
+
+- name: Retrieve detailed S3 information for all objects in the bucket
+ amazon.aws.s3_object_info:
+ bucket_name: MyTestBucket
+ object_details:
+ object_acl: true
+ object_attributes: true
+ attributes_list:
+ - ETag
+ - ObjectSize
+ - StorageClass
+
+- name: Retrieve detailed S3 object information
+ amazon.aws.s3_object_info:
+ bucket_name: MyTestBucket
+ object_name: MyTestObjectKey
+ object_details:
+ object_acl: true
+ object_tagging: true
+ object_legal_hold: true
+ object_attributes: true
+ attributes_list:
+ - ETag
+ - ObjectSize
+
+'''
+
+RETURN = r'''
+s3_keys:
+ description: List of object keys.
+ returned: when only I(bucket_name) is specified and I(object_name), I(object_details) are not specified.
+ type: list
+ elements: str
+ sample:
+ - prefix1/
+ - prefix1/key1
+ - prefix1/key2
+object_info:
+ description: S3 object details.
+ returned: when I(bucket_name) and I(object_name) are specified.
+ type: list
+ elements: dict
+ contains:
+ object_data:
+ description: A dict containing the metadata of S3 object.
+ returned: when I(bucket_name) and I(object_name) are specified but I(object_details) is not specified.
+ type: dict
+ elements: str
+ contains:
+ accept_ranges:
+ description: Indicates that a range of bytes was specified.
+ returned: always
+ type: str
+ content_length:
+ description: Size of the body (object data) in bytes.
+ returned: always
+ type: int
+ content_type:
+ description: A standard MIME type describing the format of the object data.
+ returned: always
+ type: str
+ e_tag:
+ description: A opaque identifier assigned by a web server to a specific version of a resource found at a URL.
+ returned: always
+ type: str
+ last_modified:
+ description: Creation date of the object.
+ returned: always
+ type: str
+ metadata:
+ description: A map of metadata to store with the object in S3.
+ returned: always
+ type: dict
+ server_side_encryption:
+ description: The server-side encryption algorithm used when storing this object in Amazon S3.
+ returned: always
+ type: str
+ tag_count:
+ description: The number of tags, if any, on the object.
+ returned: always
+ type: int
+ object_acl:
+ description: Access control list (ACL) of an object.
+ returned: when I(object_acl) is set to I(true).
+ type: complex
+ contains:
+ owner:
+ description: Bucket owner's display ID and name.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: Bucket owner's ID.
+ returned: always
+ type: str
+ sample: "xxxxxxxxxxxxxxxxxxxxx"
+ display_name:
+ description: Bucket owner's display name.
+ returned: always
+ type: str
+ sample: 'abcd'
+ grants:
+ description: A list of grants.
+ returned: always
+ type: complex
+ contains:
+ grantee:
+ description: The entity being granted permissions.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: The canonical user ID of the grantee.
+ returned: always
+ type: str
+ sample: "xxxxxxxxxxxxxxxxxxx"
+ type:
+ description: type of grantee.
+ returned: always
+ type: str
+ sample: "CanonicalUser"
+ permission:
+ description: Specifies the permission given to the grantee.
+ returned: always
+ type: str
+ sample: "FULL CONTROL"
+ object_legal_hold:
+ description: Object's current legal hold status
+ returned: when I(object_legal_hold) is set to I(true) and object legal hold is set on the bucket.
+ type: complex
+ contains:
+ legal_hold:
+ description: The current legal hold status for the specified object.
+ returned: always
+ type: complex
+ contains:
+ status:
+ description: Indicates whether the specified object has a legal hold in place.
+ returned: always
+ type: str
+ sample: "ON"
+ object_lock_configuration:
+ description: Object Lock configuration for a bucket.
+ returned: when I(object_lock_configuration) is set to I(true) and object lock configuration is set on the bucket.
+ type: complex
+ contains:
+ object_lock_enabled:
+ description: Indicates whether this bucket has an Object Lock configuration enabled.
+ returned: always
+ type: str
+ rule:
+ description: Specifies the Object Lock rule for the specified object.
+ returned: always
+ type: complex
+ contains:
+ default_retention:
+ description: The default Object Lock retention mode and period that you want to apply to new objects placed in the specified bucket.
+ returned: always
+ type: complex
+ contains:
+ mode:
+ description:
+ - The default Object Lock retention mode you want to apply to new objects placed in the specified bucket.
+ - Must be used with either Days or Years.
+ returned: always
+ type: str
+ days:
+ description: The number of days that you want to specify for the default retention period.
+ returned: always
+ type: int
+ years:
+ description: The number of years that you want to specify for the default retention period.
+ returned: always
+ type: int
+ object_retention:
+ description: Object's retention settings.
+ returned: when I(object_retention) is set to I(true) and object retention is set on the bucket.
+ type: complex
+ contains:
+ retention:
+ description: The container element for an object's retention settings.
+ returned: always
+ type: complex
+ contains:
+ mode:
+ description: Indicates the Retention mode for the specified object.
+ returned: always
+ type: str
+ retain_until_date:
+ description: The date on which this Object Lock Retention will expire.
+ returned: always
+ type: str
+ object_tagging:
+ description: The tag-set of an object
+ returned: when I(object_tagging) is set to I(true).
+ type: dict
+ object_attributes:
+ description: Object attributes.
+ returned: when I(object_attributes) is set to I(true).
+ type: complex
+ contains:
+ etag:
+ description: An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.
+ returned: always
+ type: str
+ sample: "8fa34xxxxxxxxxxxxxxxxxxxxx35c6f3b"
+ last_modified:
+ description: The creation date of the object.
+ returned: always
+ type: str
+ sample: "2022-08-10T01:11:03+00:00"
+ object_size:
+ description: The size of the object in bytes.
+ returned: alwayS
+ type: int
+ sample: 819
+ checksum:
+ description: The checksum or digest of the object.
+ returned: always
+ type: complex
+ contains:
+ checksum_crc32:
+ description: The base64-encoded, 32-bit CRC32 checksum of the object.
+ returned: if it was upload with the object.
+ type: str
+ sample: "xxxxxxxxxxxx"
+ checksum_crc32c:
+ description: The base64-encoded, 32-bit CRC32C checksum of the object.
+ returned: if it was upload with the object.
+ type: str
+ sample: "xxxxxxxxxxxx"
+ checksum_sha1:
+ description: The base64-encoded, 160-bit SHA-1 digest of the object.
+ returned: if it was upload with the object.
+ type: str
+ sample: "xxxxxxxxxxxx"
+ checksum_sha256:
+ description: The base64-encoded, 256-bit SHA-256 digest of the object.
+ returned: if it was upload with the object.
+ type: str
+ sample: "xxxxxxxxxxxx"
+ object_parts:
+ description: A collection of parts associated with a multipart upload.
+ returned: always
+ type: complex
+ contains:
+ total_parts_count:
+ description: The total number of parts.
+ returned: always
+ type: int
+ part_number_marker:
+ description: The marker for the current part.
+ returned: always
+ type: int
+ next_part_number_marker:
+ description:
+ - When a list is truncated, this element specifies the last part in the list
+ - As well as the value to use for the PartNumberMarker request parameter in a subsequent request.
+ returned: always
+ type: int
+ max_parts:
+ description: The maximum number of parts allowed in the response.
+ returned: always
+ type: int
+ is_truncated:
+ description: Indicates whether the returned list of parts is truncated.
+ returned: always
+ type: bool
+ storage_class:
+ description: The storage class information of the object.
+ returned: always
+ type: str
+ sample: "STANDARD"
+ parts:
+ description: A container for elements related to an individual part.
+ returned: always
+ type: complex
+ contains:
+ part_number:
+ description: The part number identifying the part. This value is a positive integer between 1 and 10,000.
+ returned: always
+ type: int
+ size:
+ description: The size of the uploaded part in bytes.
+ returned: always
+ type: int
+ checksum_crc32:
+ description: The base64-encoded, 32-bit CRC32 checksum of the object.
+ returned: if it was upload with the object.
+ type: str
+ sample: "xxxxxxxxxxxx"
+ checksum_crc32c:
+ description: The base64-encoded, 32-bit CRC32C checksum of the object.
+ returned: if it was upload with the object.
+ type: str
+ sample: "xxxxxxxxxxxx"
+ checksum_sha1:
+ description: The base64-encoded, 160-bit SHA-1 digest of the object.
+ returned: if it was upload with the object.
+ type: str
+ sample: "xxxxxxxxxxxx"
+ checksum_sha256:
+ description: The base64-encoded, 256-bit SHA-256 digest of the object.
+ returned: if it was upload with the object.
+ type: str
+ sample: "xxxxxxxxxxxx"
+'''
+
+import os
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.basic import to_text
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn
+
+
+def describe_s3_object_acl(connection, bucket_name, object_name):
+ params = {}
+ params['Bucket'] = bucket_name
+ params['Key'] = object_name
+
+ object_acl_info = {}
+
+ try:
+ object_acl_info = connection.get_object_acl(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ pass
+
+ if len(object_acl_info) != 0:
+ # Remove ResponseMetadata from object_acl_info, convert to snake_case
+ del object_acl_info['ResponseMetadata']
+ object_acl_info = camel_dict_to_snake_dict(object_acl_info)
+
+ return object_acl_info
+
+
+def describe_s3_object_attributes(connection, module, bucket_name, object_name):
+ params = {}
+ params['Bucket'] = bucket_name
+ params['Key'] = object_name
+ params['ObjectAttributes'] = module.params.get('object_details')['attributes_list']
+
+ object_attributes_info = {}
+
+ try:
+ object_attributes_info = connection.get_object_attributes(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ object_attributes_info['msg'] = 'Object attributes not found'
+
+ if len(object_attributes_info) != 0 and 'msg' not in object_attributes_info.keys():
+ # Remove ResponseMetadata from object_attributes_info, convert to snake_case
+ del object_attributes_info['ResponseMetadata']
+ object_attributes_info = camel_dict_to_snake_dict(object_attributes_info)
+
+ return object_attributes_info
+
+
+def describe_s3_object_legal_hold(connection, bucket_name, object_name):
+ params = {}
+ params['Bucket'] = bucket_name
+ params['Key'] = object_name
+
+ object_legal_hold_info = {}
+
+ try:
+ object_legal_hold_info = connection.get_object_legal_hold(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ pass
+
+ if len(object_legal_hold_info) != 0:
+ # Remove ResponseMetadata from object_legal_hold_info, convert to snake_case
+ del object_legal_hold_info['ResponseMetadata']
+ object_legal_hold_info = camel_dict_to_snake_dict(object_legal_hold_info)
+
+ return object_legal_hold_info
+
+
+def describe_s3_object_lock_configuration(connection, bucket_name):
+ params = {}
+ params['Bucket'] = bucket_name
+
+ object_legal_lock_configuration_info = {}
+
+ try:
+ object_legal_lock_configuration_info = connection.get_object_lock_configuration(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ pass
+
+ if len(object_legal_lock_configuration_info) != 0:
+ # Remove ResponseMetadata from object_legal_lock_configuration_info, convert to snake_case
+ del object_legal_lock_configuration_info['ResponseMetadata']
+ object_legal_lock_configuration_info = camel_dict_to_snake_dict(object_legal_lock_configuration_info)
+
+ return object_legal_lock_configuration_info
+
+
+def describe_s3_object_retention(connection, bucket_name, object_name):
+ params = {}
+ params['Bucket'] = bucket_name
+ params['Key'] = object_name
+
+ object_retention_info = {}
+
+ try:
+ object_retention_info = connection.get_object_retention(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ pass
+
+ if len(object_retention_info) != 0:
+ # Remove ResponseMetadata from object_retention_info, convert to snake_case
+ del object_retention_info['ResponseMetadata']
+ object_retention_info = camel_dict_to_snake_dict(object_retention_info)
+
+ return object_retention_info
+
+
+def describe_s3_object_tagging(connection, bucket_name, object_name):
+ params = {}
+ params['Bucket'] = bucket_name
+ params['Key'] = object_name
+
+ object_tagging_info = {}
+
+ try:
+ object_tagging_info = connection.get_object_tagging(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ pass
+
+ if len(object_tagging_info) != 0:
+ # Remove ResponseMetadata from object_tagging_info, convert to snake_case
+ del object_tagging_info['ResponseMetadata']
+ object_tagging_info = boto3_tag_list_to_ansible_dict(object_tagging_info['TagSet'])
+
+ return object_tagging_info
+
+
+def get_object_details(connection, module, bucket_name, object_name, requested_facts):
+
+ all_facts = {}
+
+ # Remove non-requested facts
+ requested_facts = {fact: value for fact, value in requested_facts.items() if value is True}
+
+ all_facts['object_data'] = get_object(connection, bucket_name, object_name)['object_data']
+
+ # Below APIs do not return object_name, need to add it manually
+ all_facts['object_name'] = object_name
+
+ for key in requested_facts:
+ if key == 'object_acl':
+ all_facts[key] = {}
+ all_facts[key] = describe_s3_object_acl(connection, bucket_name, object_name)
+ elif key == 'object_attributes':
+ all_facts[key] = {}
+ all_facts[key] = describe_s3_object_attributes(connection, module, bucket_name, object_name)
+ elif key == 'object_legal_hold':
+ all_facts[key] = {}
+ all_facts[key] = describe_s3_object_legal_hold(connection, bucket_name, object_name)
+ elif key == 'object_lock_configuration':
+ all_facts[key] = {}
+ all_facts[key] = describe_s3_object_lock_configuration(connection, bucket_name)
+ elif key == 'object_retention':
+ all_facts[key] = {}
+ all_facts[key] = describe_s3_object_retention(connection, bucket_name, object_name)
+ elif key == 'object_tagging':
+ all_facts[key] = {}
+ all_facts[key] = describe_s3_object_tagging(connection, bucket_name, object_name)
+
+ return all_facts
+
+
+def get_object(connection, bucket_name, object_name):
+ params = {}
+ params['Bucket'] = bucket_name
+ params['Key'] = object_name
+
+ result = {}
+ object_info = {}
+
+ try:
+ object_info = connection.head_object(**params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ pass
+
+ if len(object_info) != 0:
+ # Remove ResponseMetadata from object_info, convert to snake_case
+ del object_info['ResponseMetadata']
+ object_info = camel_dict_to_snake_dict(object_info)
+
+ result['object_data'] = object_info
+
+ return result
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _list_bucket_objects(connection, **params):
+ paginator = connection.get_paginator('list_objects')
+ return paginator.paginate(**params).build_full_result()
+
+
+def list_bucket_objects(connection, module, bucket_name):
+ params = {}
+ params['Bucket'] = bucket_name
+
+ result = []
+ list_objects_response = {}
+
+ try:
+ list_objects_response = _list_bucket_objects(connection, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to list bucket objects.')
+
+ if len(list_objects_response) != 0:
+ # convert to snake_case
+ for response_list_item in list_objects_response['Contents']:
+ result.append(response_list_item['Key'])
+
+ return result
+
+
+def bucket_check(connection, module, bucket_name,):
+ try:
+ connection.head_bucket(Bucket=bucket_name)
+ except is_boto3_error_code(['404', '403']) as e:
+ module.fail_json_aws(e, msg="The bucket %s does not exist or is missing access permissions." % bucket_name)
+
+
+def object_check(connection, module, bucket_name, object_name):
+ try:
+ connection.head_object(Bucket=bucket_name, Key=object_name)
+ except is_boto3_error_code(['404', '403']) as e:
+ module.fail_json_aws(e, msg="The object %s does not exist or is missing access permissions." % object_name)
+
+
+# To get S3 connection, in case of dealing with ceph, dualstack, etc.
+def is_fakes3(endpoint_url):
+ """ Return True if endpoint_url has scheme fakes3:// """
+ if endpoint_url is not None:
+ return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False):
+ if ceph: # TODO - test this
+ ceph = urlparse(endpoint_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https',
+ region=location, endpoint=endpoint_url, **aws_connect_kwargs)
+ elif is_fakes3(endpoint_url):
+ fakes3 = urlparse(endpoint_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs)
+ if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ elif module.params['mode'] in ('get', 'getstr') and sig_4:
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ if module.params['dualstack']:
+ dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
+ if 'config' in params:
+ params['config'] = params['config'].merge(dualconf)
+ else:
+ params['config'] = dualconf
+ return boto3_conn(**params)
+
+
+def main():
+
+ argument_spec = dict(
+ object_details=dict(type='dict', options=dict(
+ object_acl=dict(type='bool', default=False),
+ object_legal_hold=dict(type='bool', default=False),
+ object_lock_configuration=dict(type='bool', default=False),
+ object_retention=dict(type='bool', default=False),
+ object_tagging=dict(type='bool', default=False),
+ object_attributes=dict(type='bool', default=False),
+ attributes_list=dict(type='list', elements='str', choices=['ETag', 'Checksum', 'ObjectParts', 'StorageClass', 'ObjectSize'])),
+ required_if=[
+ ("object_attributes", True, ["attributes_list"]),
+ ]
+ ),
+ bucket_name=dict(required=True, type='str'),
+ object_name=dict(type='str'),
+ dualstack=dict(default='no', type='bool'),
+ ceph=dict(default=False, type='bool', aliases=['rgw']),
+ )
+
+ required_if = [
+ ['ceph', True, ['endpoint_url']],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ )
+
+ bucket_name = module.params.get('bucket_name')
+ object_name = module.params.get('object_name')
+ requested_object_details = module.params.get('object_details')
+ endpoint_url = module.params.get('endpoint_url')
+ dualstack = module.params.get('dualstack')
+ ceph = module.params.get('ceph')
+
+ if not endpoint_url and 'S3_URL' in os.environ:
+ endpoint_url = os.environ['S3_URL']
+ module.deprecate(
+ "Support for the 'S3_URL' environment variable has been "
+ "deprecated. We recommend using the 'endpoint_url' module "
+ "parameter. Alternatively, the 'AWS_URL' environment variable can "
+ "be used instead.",
+ date='2024-12-01', collection_name='amazon.aws',
+ )
+
+ if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url:
+ module.fail_json(msg='dualstack only applies to AWS S3')
+
+ result = []
+
+ if endpoint_url:
+ region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ connection = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url)
+ else:
+ try:
+ connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to connect to AWS')
+
+ # check if specified bucket exists
+ bucket_check(connection, module, bucket_name)
+ # check if specified object exists
+ if object_name:
+ object_check(connection, module, bucket_name, object_name)
+
+ if requested_object_details and requested_object_details['object_attributes']:
+ module.require_botocore_at_least('1.24.7', reason='required for s3.get_object_attributes')
+
+ if requested_object_details:
+ if object_name:
+ object_details = get_object_details(connection, module, bucket_name, object_name, requested_object_details)
+ result.append(object_details)
+ elif object_name is None:
+ object_list = list_bucket_objects(connection, module, bucket_name)
+ for object in object_list:
+ result.append(get_object_details(connection, module, bucket_name, object, requested_object_details))
+
+ elif not requested_object_details and object_name:
+ # if specific details are not requested, return object metadata
+ object_details = get_object(connection, bucket_name, object_name)
+ result.append(object_details)
+ else:
+ # return list of all objects in a bucket if object name and object details not specified
+ object_list = list_bucket_objects(connection, module, bucket_name)
+ module.exit_json(s3_keys=object_list)
+
+ module.exit_json(object_info=result)
+
+
+if __name__ == '__main__':
+ main()