summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/amazon/aws/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'collections-debian-merged/ansible_collections/amazon/aws/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/action/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/action/aws_s3.py71
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/callback/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py71
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py95
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py45
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py21
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py21
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py663
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py370
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py128
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py265
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py78
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py234
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/acm.py211
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/batch.py106
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/cloud.py220
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py231
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/core.py381
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py89
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/ec2.py807
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py109
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py919
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/iam.py76
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/rds.py235
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/s3.py83
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/urls.py212
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/waf.py224
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/waiters.py551
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_facts.py113
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_facts.py112
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py112
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_s3.py947
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation.py808
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_facts.py349
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py349
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2.py1740
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py761
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_facts.py287
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py287
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py1338
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py882
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_facts.py298
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py298
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group.py1380
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_facts.py148
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_info.py148
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_key.py267
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py563
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py322
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_facts.py248
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py248
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py200
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py88
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py809
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_facts.py195
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py195
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py413
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_facts.py144
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py144
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py535
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_facts.py268
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py268
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py599
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_facts.py229
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py229
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py876
73 files changed, 23826 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/action/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/action/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/action/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/action/aws_s3.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/action/aws_s3.py
new file mode 100644
index 00000000..a454922a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/action/aws_s3.py
@@ -0,0 +1,71 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2018, Will Thames <will@thames.id.au>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound
+from ansible.module_utils._text import to_text
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for aws_s3 operations '''
+ self._supports_async = True
+
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ source = self._task.args.get('src', None)
+
+ try:
+ new_module_args = self._task.args.copy()
+ if source:
+ source = os.path.expanduser(source)
+
+ # For backward compatibility check if the file exists on the remote; it should take precedence
+ if not self._remote_file_exists(source):
+ try:
+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)
+ new_module_args['src'] = source
+ except AnsibleFileNotFound as e:
+ # module handles error message for nonexistent files
+ new_module_args['src'] = source
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+ # execute the aws_s3 module with the updated args
+ result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async))
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ return result
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/callback/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/callback/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/callback/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py
new file mode 100644
index 00000000..9dae8e6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py
@@ -0,0 +1,71 @@
+# (C) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: aws_resource_actions
+ type: aggregate
+ short_description: summarizes all "resource:actions" completed
+ description:
+ - Ansible callback plugin for collecting the AWS actions completed by all boto3 modules using
+ AnsibleAWSModule in a playbook. Botocore endpoint logs need to be enabled for those modules, which can
+ be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults.
+ requirements:
+ - whitelisting in configuration - see examples section below for details.
+'''
+
+EXAMPLES = '''
+example: >
+ To enable, add this to your ansible.cfg file in the defaults block
+ [defaults]
+ callback_whitelist = aws_resource_actions
+sample output: >
+#
+# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload',
+# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload',
+# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject']
+#
+sample output: >
+#
+# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags',
+# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc']
+#
+'''
+
+from ansible.plugins.callback import CallbackBase
+from ansible.module_utils._text import to_native
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.8
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'amazon.aws.aws_resource_actions'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+ self.aws_resource_actions = []
+ super(CallbackModule, self).__init__()
+
+ def extend_aws_resource_actions(self, result):
+ if result.get('resource_actions'):
+ self.aws_resource_actions.extend(result['resource_actions'])
+
+ def runner_on_ok(self, host, res):
+ self.extend_aws_resource_actions(res)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.extend_aws_resource_actions(res)
+
+ def v2_runner_item_on_ok(self, result):
+ self.extend_aws_resource_actions(result._result)
+
+ def v2_runner_item_on_failed(self, result):
+ self.extend_aws_resource_actions(result._result)
+
+ def playbook_on_stats(self, stats):
+ if self.aws_resource_actions:
+ self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions)))
+ self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions))
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py
new file mode 100644
index 00000000..9eec9a8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Will Thames <will@thames.id.au>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # AWS only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ debug_botocore_endpoint_logs:
+ description:
+ - Use a botocore.endpoint logger to parse the unique (rather than total) "resource:action" API calls made during a task, outputing
+ the set to the resource_actions key in the task results. Use the aws_resource_action callback to output to total list made during
+ a playbook. The ANSIBLE_DEBUG_BOTOCORE_LOGS environment variable may also be used.
+ type: bool
+ default: 'no'
+ ec2_url:
+ description:
+ - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints).
+ Ignored for modules where region is required. Must be specified for all other modules if region is not used.
+ If not set then the value of the EC2_URL environment variable, if any, is used.
+ type: str
+ aliases: [ aws_endpoint_url, endpoint_url ]
+ aws_secret_key:
+ description:
+ - AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
+ - If I(profile) is set this parameter is ignored.
+ - Passing the I(aws_secret_key) and I(profile) options at the same time has been deprecated
+ and the options will be made mutually exclusive after 2022-06-01.
+ type: str
+ aliases: [ ec2_secret_key, secret_key ]
+ aws_access_key:
+ description:
+ - AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
+ - If I(profile) is set this parameter is ignored.
+ - Passing the I(aws_access_key) and I(profile) options at the same time has been deprecated
+ and the options will be made mutually exclusive after 2022-06-01.
+ type: str
+ aliases: [ ec2_access_key, access_key ]
+ security_token:
+ description:
+ - AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
+ - If I(profile) is set this parameter is ignored.
+ - Passing the I(security_token) and I(profile) options at the same time has been deprecated
+ and the options will be made mutually exclusive after 2022-06-01.
+ type: str
+ aliases: [ aws_security_token, access_token ]
+ aws_ca_bundle:
+ description:
+ - "The location of a CA Bundle to use when validating SSL certificates."
+ - "Only used for boto3 based modules."
+ - "Note: The CA Bundle is read 'module' side and may need to be explicitly copied from the controller if not run locally."
+ type: path
+ validate_certs:
+ description:
+ - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
+ type: bool
+ default: yes
+ profile:
+ description:
+ - Uses a boto profile. Only works with boto >= 2.24.0.
+ - Using I(profile) will override I(aws_access_key), I(aws_secret_key) and I(security_token)
+ and support for passing them at the same time as I(profile) has been deprecated.
+ - I(aws_access_key), I(aws_secret_key) and I(security_token) will be made mutually exclusive with I(profile) after 2022-06-01.
+ type: str
+ aliases: [ aws_profile ]
+ aws_config:
+ description:
+ - A dictionary to modify the botocore configuration.
+ - Parameters can be found at U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config).
+ - Only the 'user_agent' key is used for boto modules. See U(http://boto.cloudhackers.com/en/latest/boto_config_tut.html#boto) for more boto configuration.
+ type: dict
+requirements:
+ - python >= 2.6
+ - boto
+notes:
+ - If parameters are not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(AWS_URL) or C(EC2_URL),
+ C(AWS_PROFILE) or C(AWS_DEFAULT_PROFILE),
+ C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
+ C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
+ C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
+ C(AWS_REGION) or C(EC2_REGION),
+ C(AWS_CA_BUNDLE)
+ - Ansible uses the boto configuration file (typically ~/.boto) if no
+ credentials are provided. See https://boto.readthedocs.io/en/latest/boto_config_tut.html
+ - C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
+ AWS region, when required, but this can also be configured in the boto config file
+'''
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py
new file mode 100644
index 00000000..73eff046
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Plugin options for AWS credentials
+ DOCUMENTATION = r'''
+options:
+ aws_profile:
+ description: The AWS profile
+ type: str
+ aliases: [ boto_profile ]
+ env:
+ - name: AWS_DEFAULT_PROFILE
+ - name: AWS_PROFILE
+ aws_access_key:
+ description: The AWS access key to use.
+ type: str
+ aliases: [ aws_access_key_id ]
+ env:
+ - name: EC2_ACCESS_KEY
+ - name: AWS_ACCESS_KEY
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_key:
+ description: The AWS secret key that corresponds to the access key.
+ type: str
+ aliases: [ aws_secret_access_key ]
+ env:
+ - name: EC2_SECRET_KEY
+ - name: AWS_SECRET_KEY
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_security_token:
+ description: The AWS security token if using temporary access and secret keys.
+ type: str
+ env:
+ - name: EC2_SECURITY_TOKEN
+ - name: AWS_SESSION_TOKEN
+ - name: AWS_SECURITY_TOKEN
+'''
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py
new file mode 100644
index 00000000..52152660
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Plugin option for AWS region
+ DOCUMENTATION = r'''
+options:
+ region:
+ description: The region for which to create the connection.
+ type: str
+ env:
+ - name: EC2_REGION
+ - name: AWS_REGION
+'''
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py
new file mode 100644
index 00000000..09613882
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # EC2 only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ region:
+ description:
+ - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
+ See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
+ type: str
+ aliases: [ aws_region, ec2_region ]
+'''
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
new file mode 100644
index 00000000..01f9de03
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
@@ -0,0 +1,663 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: aws_ec2
+ plugin_type: inventory
+ short_description: EC2 inventory source
+ requirements:
+ - boto3
+ - botocore
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ - amazon.aws.aws_credentials
+
+ description:
+ - Get inventory hosts from Amazon Web Services EC2.
+ - Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
+ notes:
+ - If no credentials are provided and the control node has an associated IAM instance profile then the
+ role will be used for authentication.
+ author:
+ - Sloane Hertel (@s-hertel)
+ options:
+ plugin:
+ description: Token that ensures this is a source file for the plugin.
+ required: True
+ choices: ['aws_ec2', 'amazon.aws.aws_ec2']
+ iam_role_arn:
+ description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
+ credentials with enough privilege to perform the AssumeRole action.
+ regions:
+ description:
+ - A list of regions in which to describe EC2 instances.
+ - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
+ type: list
+ default: []
+ hostnames:
+ description:
+ - A list in order of precedence for hostname variables.
+ - You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
+ type: list
+ default: []
+ filters:
+ description:
+ - A dictionary of filter value pairs.
+ - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ type: dict
+ default: {}
+ include_extra_api_calls:
+ description:
+ - Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
+ - Spot instances may be persistent and instances may have associated events.
+ type: bool
+ default: False
+ strict_permissions:
+ description:
+ - By default if a 403 (Forbidden) error code is encountered this plugin will fail.
+ - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
+ type: bool
+ default: True
+ use_contrib_script_compatible_sanitization:
+ description:
+ - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
+ This option allows you to override that, in efforts to allow migration from the old inventory script and
+ matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
+ To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
+ you will need to replace hyphens with underscores via the regex_replace filter for those entries.
+ - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
+ otherwise the core engine will just use the standard sanitization on top.
+ - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
+ which group names end up being used as.
+ type: bool
+ default: False
+'''
+
+EXAMPLES = '''
+# Minimal example using environment vars or instance role credentials
+# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
+plugin: aws_ec2
+regions:
+ - us-east-1
+
+# Example using filters, ignoring permission errors, and specifying the hostname precedence
+plugin: aws_ec2
+boto_profile: aws_profile
+# Populate inventory with instances in these regions
+regions:
+ - us-east-1
+ - us-east-2
+filters:
+ # All instances with their `Environment` tag set to `dev`
+ tag:Environment: dev
+ # All dev and QA hosts
+ tag:Environment:
+ - dev
+ - qa
+ instance.group-id: sg-xxxxxxxx
+# Ignores 403 errors rather than failing
+strict_permissions: False
+# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
+# inventory_hostname use compose (see example below).
+hostnames:
+ - tag:Name=Tag1,Name=Tag2 # Return specific hosts only
+ - tag:CustomDNSName
+ - dns-name
+ - name: 'tag:Name=Tag1,Name=Tag2'
+ - name: 'private-ip-address'
+ separator: '_'
+ prefix: 'tag:Name'
+
+# Example using constructed features to create groups and set ansible_host
+plugin: aws_ec2
+regions:
+ - us-east-1
+ - us-west-1
+# keyed_groups may be used to create custom groups
+strict: False
+keyed_groups:
+ # Add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'architecture'
+ # Add hosts to tag_Name_Value groups for each Name/Value tag pair
+ - prefix: tag
+ key: tags
+ # Add hosts to e.g. instance_type_z3_tiny
+ - prefix: instance_type
+ key: instance_type
+ # Create security_groups_sg_abcd1234 group for each SG
+ - key: 'security_groups|json_query("[].group_id")'
+ prefix: 'security_groups'
+ # Create a group for each value of the Application tag
+ - key: tags.Application
+ separator: ''
+ # Create a group per region e.g. aws_region_us_east_2
+ - key: placement.region
+ prefix: aws_region
+ # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
+ - key: tags['Role']
+ prefix: foo
+ parent_group: "project"
+# Set individual variables with compose
+compose:
+ # Use the private IP address to connect to the host
+ # (note: this does not modify inventory_hostname, which is set via I(hostnames))
+ ansible_host: private_ip_address
+'''
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native, to_text
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
+
+# The mappings give an array of keys to get from the filter name to the value
+# returned by boto3's EC2 describe_instances method.
+
+instance_meta_filter_to_boto_attr = {
+ 'group-id': ('Groups', 'GroupId'),
+ 'group-name': ('Groups', 'GroupName'),
+ 'network-interface.attachment.instance-owner-id': ('OwnerId',),
+ 'owner-id': ('OwnerId',),
+ 'requester-id': ('RequesterId',),
+ 'reservation-id': ('ReservationId',),
+}
+
+instance_data_filter_to_boto_attr = {
+ 'affinity': ('Placement', 'Affinity'),
+ 'architecture': ('Architecture',),
+ 'availability-zone': ('Placement', 'AvailabilityZone'),
+ 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
+ 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
+ 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
+ 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
+ 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
+ 'client-token': ('ClientToken',),
+ 'dns-name': ('PublicDnsName',),
+ 'host-id': ('Placement', 'HostId'),
+ 'hypervisor': ('Hypervisor',),
+ 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
+ 'image-id': ('ImageId',),
+ 'instance-id': ('InstanceId',),
+ 'instance-lifecycle': ('InstanceLifecycle',),
+ 'instance-state-code': ('State', 'Code'),
+ 'instance-state-name': ('State', 'Name'),
+ 'instance-type': ('InstanceType',),
+ 'instance.group-id': ('SecurityGroups', 'GroupId'),
+ 'instance.group-name': ('SecurityGroups', 'GroupName'),
+ 'ip-address': ('PublicIpAddress',),
+ 'kernel-id': ('KernelId',),
+ 'key-name': ('KeyName',),
+ 'launch-index': ('AmiLaunchIndex',),
+ 'launch-time': ('LaunchTime',),
+ 'monitoring-state': ('Monitoring', 'State'),
+ 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
+ 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
+ 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
+ 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
+ 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
+ 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
+ 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
+ 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
+ 'network-interface.attachment.instance-id': ('InstanceId',),
+ 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
+ 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
+ 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
+ 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
+ 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
+ 'network-interface.description': ('NetworkInterfaces', 'Description'),
+ 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
+ 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
+ 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
+ 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
+ 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
+ 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
+ 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
+ # 'network-interface.requester-id': (),
+ 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.status': ('NetworkInterfaces', 'Status'),
+ 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
+ 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
+ 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
+ 'placement-group-name': ('Placement', 'GroupName'),
+ 'platform': ('Platform',),
+ 'private-dns-name': ('PrivateDnsName',),
+ 'private-ip-address': ('PrivateIpAddress',),
+ 'product-code': ('ProductCodes', 'ProductCodeId'),
+ 'product-code.type': ('ProductCodes', 'ProductCodeType'),
+ 'ramdisk-id': ('RamdiskId',),
+ 'reason': ('StateTransitionReason',),
+ 'root-device-name': ('RootDeviceName',),
+ 'root-device-type': ('RootDeviceType',),
+ 'source-dest-check': ('SourceDestCheck',),
+ 'spot-instance-request-id': ('SpotInstanceRequestId',),
+ 'state-reason-code': ('StateReason', 'Code'),
+ 'state-reason-message': ('StateReason', 'Message'),
+ 'subnet-id': ('SubnetId',),
+ 'tag': ('Tags',),
+ 'tag-key': ('Tags',),
+ 'tag-value': ('Tags',),
+ 'tenancy': ('Placement', 'Tenancy'),
+ 'virtualization-type': ('VirtualizationType',),
+ 'vpc-id': ('VpcId',),
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'amazon.aws.aws_ec2'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ self.group_prefix = 'aws_ec2_'
+
+ # credentials
+ self.boto_profile = None
+ self.aws_secret_access_key = None
+ self.aws_access_key_id = None
+ self.aws_security_token = None
+ self.iam_role_arn = None
+
+ def _compile_values(self, obj, attr):
+ '''
+ :param obj: A list or dict of instance attributes
+ :param attr: A key
+ :return The value(s) found via the attr
+ '''
+ if obj is None:
+ return
+
+ temp_obj = []
+
+ if isinstance(obj, list) or isinstance(obj, tuple):
+ for each in obj:
+ value = self._compile_values(each, attr)
+ if value:
+ temp_obj.append(value)
+ else:
+ temp_obj = obj.get(attr)
+
+ has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
+ if has_indexes and len(temp_obj) == 1:
+ return temp_obj[0]
+
+ return temp_obj
+
+ def _get_boto_attr_chain(self, filter_name, instance):
+ '''
+ :param filter_name: The filter
+ :param instance: instance dict returned by boto3 ec2 describe_instances()
+ '''
+ allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
+ if filter_name not in allowed_filters:
+ raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
+ allowed_filters))
+ if filter_name in instance_data_filter_to_boto_attr:
+ boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
+ else:
+ boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
+
+ instance_value = instance
+ for attribute in boto_attr_list:
+ instance_value = self._compile_values(instance_value, attribute)
+ return instance_value
+
+ def _get_credentials(self):
+ '''
+ :return A dictionary of boto client credentials
+ '''
+ boto_params = {}
+ for credential in (('aws_access_key_id', self.aws_access_key_id),
+ ('aws_secret_access_key', self.aws_secret_access_key),
+ ('aws_session_token', self.aws_security_token)):
+ if credential[1]:
+ boto_params[credential[0]] = credential[1]
+
+ return boto_params
+
+ def _get_connection(self, credentials, region='us-east-1'):
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ return connection
+
+ def _boto3_assume_role(self, credentials, region):
+ """
+ Assume an IAM role passed by iam_role_arn parameter
+
+ :return: a dict containing the credentials of the assumed role
+ """
+
+ iam_role_arn = self.iam_role_arn
+
+ try:
+ sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
+ sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
+ return dict(
+ aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
+ aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
+ aws_session_token=sts_session['Credentials']['SessionToken']
+ )
+ except botocore.exceptions.ClientError as e:
+ raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
+
+ def _boto3_conn(self, regions):
+ '''
+ :param regions: A list of regions to create a boto3 client
+
+ Generator that yields a boto3 client and the region
+ '''
+
+ credentials = self._get_credentials()
+ iam_role_arn = self.iam_role_arn
+
+ if not regions:
+ try:
+ # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
+ client = self._get_connection(credentials)
+ resp = client.describe_regions()
+ regions = [x['RegionName'] for x in resp.get('Regions', [])]
+ except botocore.exceptions.NoRegionError:
+ # above seems to fail depending on boto3 version, ignore and lets try something else
+ pass
+
+ # fallback to local list hardcoded in boto3 if still no regions
+ if not regions:
+ session = boto3.Session()
+ regions = session.get_available_regions('ec2')
+
+ # I give up, now you MUST give me regions
+ if not regions:
+ raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
+
+ for region in regions:
+ connection = self._get_connection(credentials, region)
+ try:
+ if iam_role_arn is not None:
+ assumed_credentials = self._boto3_assume_role(credentials, region)
+ else:
+ assumed_credentials = credentials
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ yield connection, region
+
+ def _get_instances_by_region(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions in which to describe instances
+ :param filters: a list of boto3 filter dictionaries
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ :return A list of instance dictionaries
+ '''
+ all_instances = []
+
+ for connection, region in self._boto3_conn(regions):
+ try:
+ # By default find non-terminated/terminating instances
+ if not any([f['Name'] == 'instance-state-name' for f in filters]):
+ filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
+ paginator = connection.get_paginator('describe_instances')
+ reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
+ instances = []
+ for r in reservations:
+ new_instances = r['Instances']
+ for instance in new_instances:
+ instance.update(self._get_reservation_details(r))
+ if self.get_option('include_extra_api_calls'):
+ instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
+ instances.extend(new_instances)
+ except botocore.exceptions.ClientError as e:
+ if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
+ instances = []
+ else:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+ except botocore.exceptions.BotoCoreError as e:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+
+ all_instances.extend(instances)
+
+ return sorted(all_instances, key=lambda x: x['InstanceId'])
+
+ def _get_reservation_details(self, reservation):
+ return {
+ 'OwnerId': reservation['OwnerId'],
+ 'RequesterId': reservation.get('RequesterId', ''),
+ 'ReservationId': reservation['ReservationId']
+ }
+
+ def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
+ host_vars = {'Events': '', 'Persistent': False}
+ try:
+ kwargs = {'InstanceIds': [instance_id]}
+ host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not self.get_option('strict_permissions'):
+ pass
+ else:
+ raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
+ if spot_instance:
+ try:
+ kwargs = {'SpotInstanceRequestIds': [spot_instance]}
+ host_vars['Persistent'] = bool(
+ connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not self.get_option('strict_permissions'):
+ pass
+ else:
+ raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
+ return host_vars
+
+ def _get_tag_hostname(self, preference, instance):
+ tag_hostnames = preference.split('tag:', 1)[1]
+ if ',' in tag_hostnames:
+ tag_hostnames = tag_hostnames.split(',')
+ else:
+ tag_hostnames = [tag_hostnames]
+ tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
+ for v in tag_hostnames:
+ if '=' in v:
+ tag_name, tag_value = v.split('=')
+ if tags.get(tag_name) == tag_value:
+ return to_text(tag_name) + "_" + to_text(tag_value)
+ else:
+ tag_value = tags.get(v)
+ if tag_value:
+ return to_text(tag_value)
+ return None
+
+ def _get_hostname(self, instance, hostnames):
+ '''
+ :param instance: an instance dict returned by boto3 ec2 describe_instances()
+ :param hostnames: a list of hostname destination variables in order of preference
+ :return the preferred identifer for the host
+ '''
+ if not hostnames:
+ hostnames = ['dns-name', 'private-dns-name']
+
+ hostname = None
+ for preference in hostnames:
+ if isinstance(preference, dict):
+ if 'name' not in preference:
+ raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.")
+ hostname = self._get_hostname(instance, [preference["name"]])
+ hostname_from_prefix = self._get_hostname(instance, [preference["prefix"]])
+ separator = preference.get("separator", "_")
+ if hostname and hostname_from_prefix and 'prefix' in preference:
+ hostname = hostname_from_prefix + separator + hostname
+ elif preference.startswith('tag:'):
+ hostname = self._get_tag_hostname(preference, instance)
+ else:
+ hostname = self._get_boto_attr_chain(preference, instance)
+ if hostname:
+ break
+ if hostname:
+ if ':' in to_text(hostname):
+ return self._sanitize_group_name((to_text(hostname)))
+ else:
+ return to_text(hostname)
+
+ def _query(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions to query
+ :param filters: a list of boto3 filter dictionaries
+ :param hostnames: a list of hostname destination variables in order of preference
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ '''
+ return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
+
+ def _populate(self, groups, hostnames):
+ for group in groups:
+ group = self.inventory.add_group(group)
+ self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
+ self.inventory.add_child('all', group)
+
+ def _add_hosts(self, hosts, group, hostnames):
+ '''
+ :param hosts: a list of hosts to be added to a group
+ :param group: the name of the group to which the hosts belong
+ :param hostnames: a list of hostname destination variables in order of preference
+ '''
+ for host in hosts:
+ hostname = self._get_hostname(host, hostnames)
+
+ host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
+ host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
+
+ # Allow easier grouping by region
+ host['placement']['region'] = host['placement']['availability_zone'][:-1]
+
+ if not hostname:
+ continue
+ self.inventory.add_host(hostname, group=group)
+ for hostvar, hostval in host.items():
+ self.inventory.set_variable(hostname, hostvar, hostval)
+
+ # Use constructed if applicable
+
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def _set_credentials(self):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+
+ self.boto_profile = self.get_option('aws_profile')
+ self.aws_access_key_id = self.get_option('aws_access_key')
+ self.aws_secret_access_key = self.get_option('aws_secret_key')
+ self.aws_security_token = self.get_option('aws_security_token')
+ self.iam_role_arn = self.get_option('iam_role_arn')
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ session = botocore.session.get_session()
+ try:
+ credentials = session.get_credentials().get_frozen_credentials()
+ except AttributeError:
+ pass
+ else:
+ self.aws_access_key_id = credentials.access_key
+ self.aws_secret_access_key = credentials.secret_key
+ self.aws_security_token = credentials.token
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
+ "inventory configuration file or set them as environment variables.")
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
+ return True
+ self.display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._read_config_data(path)
+
+ if self.get_option('use_contrib_script_compatible_sanitization'):
+ self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
+
+ self._set_credentials()
+
+ # get user specifications
+ regions = self.get_option('regions')
+ filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
+ hostnames = self.get_option('hostnames')
+ strict_permissions = self.get_option('strict_permissions')
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+
+ if not cache or cache_needs_update:
+ results = self._query(regions, filters, strict_permissions)
+
+ self._populate(results, hostnames)
+
+ # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = results
+
+ @staticmethod
+ def _legacy_script_compatible_group_sanitization(name):
+
+ # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
+ regex = re.compile(r"[^A-Za-z0-9\_\-]")
+
+ return regex.sub('_', name)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py
new file mode 100644
index 00000000..bd30db2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py
@@ -0,0 +1,370 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: aws_rds
+ plugin_type: inventory
+ short_description: rds instance source
+ description:
+ - Get instances and clusters from Amazon Web Services RDS.
+ - Uses a YAML configuration file that ends with aws_rds.(yml|yaml).
+ options:
+ regions:
+ description: A list of regions in which to describe RDS instances and clusters. Available regions are listed here
+ U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)
+ default: []
+ filters:
+ description: A dictionary of filter value pairs. Available filters are listed here
+ U(https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html#options). If you filter by
+ db-cluster-id and I(include_clusters) is True it will apply to clusters as well.
+ default: {}
+ strict_permissions:
+ description: By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to
+ False in the inventory config file which will allow the restrictions to be gracefully skipped.
+ type: bool
+ default: True
+ include_clusters:
+ description: Whether or not to query for Aurora clusters as well as instances
+ type: bool
+ default: False
+ statuses:
+ description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything.
+ type: list
+ default:
+ - creating
+ - available
+ iam_role_arn:
+ description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide
+ AWS credentials with enough privilege to perform the AssumeRole action.
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ - amazon.aws.aws_credentials
+
+ requirements:
+ - boto3
+ - botocore
+ author: Sloane Hertel (@s-hertel)
+'''
+
+EXAMPLES = '''
+plugin: aws_rds
+regions:
+ - us-east-1
+ - ca-central-1
+keyed_groups:
+ - key: 'db_parameter_groups|json_query("[].db_parameter_group_name")'
+ prefix: rds_parameter_group
+ - key: engine
+ prefix: rds
+ - key: tags
+ - key: region
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ raise AnsibleError('The RDS dynamic inventory plugin requires boto3 and botocore.')
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'amazon.aws.aws_rds'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+ self.credentials = {}
+ self.boto_profile = None
+ self.iam_role_arn = None
+
+ def _get_connection(self, credentials, region='us-east-1'):
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ return connection
+
+ def _boto3_assume_role(self, credentials, region):
+ """
+ Assume an IAM role passed by iam_role_arn parameter
+ :return: a dict containing the credentials of the assumed role
+ """
+
+ iam_role_arn = self.iam_role_arn
+
+ try:
+ sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
+ sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_rds_dynamic_inventory')
+ return dict(
+ aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
+ aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
+ aws_session_token=sts_session['Credentials']['SessionToken']
+ )
+ except botocore.exceptions.ClientError as e:
+ raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
+
+ def _boto3_conn(self, regions):
+ '''
+ :param regions: A list of regions to create a boto3 client
+
+ Generator that yields a boto3 client and the region
+ '''
+ iam_role_arn = self.iam_role_arn
+ credentials = self.credentials
+ for region in regions:
+ try:
+ if iam_role_arn is not None:
+ assumed_credentials = self._boto3_assume_role(credentials, region)
+ else:
+ assumed_credentials = credentials
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **assumed_credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ yield connection, region
+
+ def _get_hosts_by_region(self, connection, filters, strict):
+
+ def _add_tags_for_hosts(connection, hosts, strict):
+ for host in hosts:
+ if 'DBInstanceArn' in host:
+ resource_arn = host['DBInstanceArn']
+ else:
+ resource_arn = host['DBClusterArn']
+
+ try:
+ tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList']
+ except is_boto3_error_code('AccessDenied') as e:
+ if not strict:
+ tags = []
+ else:
+ raise e
+ host['Tags'] = tags
+
+ def wrapper(f, *args, **kwargs):
+ try:
+ results = f(*args, **kwargs)
+ if 'DBInstances' in results:
+ results = results['DBInstances']
+ else:
+ results = results['DBClusters']
+ _add_tags_for_hosts(connection, results, strict)
+ except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except
+ if not strict:
+ results = []
+ else:
+ raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
+ return results
+ return wrapper
+
+ def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False):
+ '''
+ :param regions: a list of regions in which to describe hosts
+ :param instance_filters: a list of boto3 filter dictionaries
+ :param cluster_filters: a list of boto3 filter dictionaries
+ :param strict: a boolean determining whether to fail or ignore 403 error codes
+ :param statuses: a list of statuses that the returned hosts should match
+ :return A list of host dictionaries
+ '''
+ all_instances = []
+ all_clusters = []
+ for connection, region in self._boto3_conn(regions):
+ paginator = connection.get_paginator('describe_db_instances')
+ all_instances.extend(
+ self._get_hosts_by_region(connection, instance_filters, strict)
+ (paginator.paginate(Filters=instance_filters).build_full_result)
+ )
+ if gather_clusters:
+ all_clusters.extend(
+ self._get_hosts_by_region(connection, cluster_filters, strict)
+ (connection.describe_db_clusters, **{'Filters': cluster_filters})
+ )
+ sorted_hosts = list(
+ sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) +
+ sorted(all_clusters, key=lambda x: x['DBClusterIdentifier'])
+ )
+ return self.find_hosts_with_valid_statuses(sorted_hosts, statuses)
+
+ def find_hosts_with_valid_statuses(self, hosts, statuses):
+ if 'all' in statuses:
+ return hosts
+ valid_hosts = []
+ for host in hosts:
+ if host.get('DBInstanceStatus') in statuses:
+ valid_hosts.append(host)
+ elif host.get('Status') in statuses:
+ valid_hosts.append(host)
+ return valid_hosts
+
+ def _populate(self, hosts):
+ group = 'aws_rds'
+ self.inventory.add_group(group)
+ if hosts:
+ self._add_hosts(hosts=hosts, group=group)
+ self.inventory.add_child('all', group)
+
+ def _populate_from_source(self, source_data):
+ hostvars = source_data.pop('_meta', {}).get('hostvars', {})
+ for group in source_data:
+ if group == 'all':
+ continue
+ else:
+ self.inventory.add_group(group)
+ hosts = source_data[group].get('hosts', [])
+ for host in hosts:
+ self._populate_host_vars([host], hostvars.get(host, {}), group)
+ self.inventory.add_child('all', group)
+
+ def _get_hostname(self, host):
+ if host.get('DBInstanceIdentifier'):
+ return host['DBInstanceIdentifier']
+ else:
+ return host['DBClusterIdentifier']
+
+ def _format_inventory(self, hosts):
+ results = {'_meta': {'hostvars': {}}}
+ group = 'aws_rds'
+ results[group] = {'hosts': []}
+ for host in hosts:
+ hostname = self._get_hostname(host)
+ results[group]['hosts'].append(hostname)
+ h = self.inventory.get_host(hostname)
+ results['_meta']['hostvars'][h.name] = h.vars
+ return results
+
+ def _add_hosts(self, hosts, group):
+ '''
+ :param hosts: a list of hosts to be added to a group
+ :param group: the name of the group to which the hosts belong
+ '''
+ for host in hosts:
+ hostname = self._get_hostname(host)
+ host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
+ host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
+
+ # Allow easier grouping by region
+ if 'availability_zone' in host:
+ host['region'] = host['availability_zone'][:-1]
+ elif 'availability_zones' in host:
+ host['region'] = host['availability_zones'][0][:-1]
+
+ self.inventory.add_host(hostname, group=group)
+ for hostvar, hostval in host.items():
+ self.inventory.set_variable(hostname, hostvar, hostval)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def _set_credentials(self):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+ self.boto_profile = self.get_option('aws_profile')
+ aws_access_key_id = self.get_option('aws_access_key')
+ aws_secret_access_key = self.get_option('aws_secret_key')
+ aws_security_token = self.get_option('aws_security_token')
+ self.iam_role_arn = self.get_option('iam_role_arn')
+
+ if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
+ session = botocore.session.get_session()
+ if session.get_credentials() is not None:
+ aws_access_key_id = session.get_credentials().access_key
+ aws_secret_access_key = session.get_credentials().secret_key
+ aws_security_token = session.get_credentials().token
+
+ if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
+ raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
+ "inventory configuration file or set them as environment variables.")
+
+ if aws_access_key_id:
+ self.credentials['aws_access_key_id'] = aws_access_key_id
+ if aws_secret_access_key:
+ self.credentials['aws_secret_access_key'] = aws_secret_access_key
+ if aws_security_token:
+ self.credentials['aws_session_token'] = aws_security_token
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('aws_rds.yml', 'aws_rds.yaml')):
+ return True
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ config_data = self._read_config_data(path)
+ self._set_credentials()
+
+ # get user specifications
+ regions = self.get_option('regions')
+ filters = self.get_option('filters')
+ strict_permissions = self.get_option('strict_permissions')
+ statuses = self.get_option('statuses')
+ include_clusters = self.get_option('include_clusters')
+ instance_filters = ansible_dict_to_boto3_filter_list(filters)
+ cluster_filters = []
+ if 'db-cluster-id' in filters and include_clusters:
+ cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']})
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ formatted_inventory = {}
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+ else:
+ self._populate_from_source(results)
+
+ if not cache or cache_needs_update:
+ results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters)
+ self._populate(results)
+ formatted_inventory = self._format_inventory(results)
+
+ # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = formatted_inventory
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py
new file mode 100644
index 00000000..ca9c57c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py
@@ -0,0 +1,128 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+lookup: aws_account_attribute
+author:
+ - Sloane Hertel <shertel@redhat.com>
+requirements:
+ - boto3
+ - botocore
+extends_documentation_fragment:
+- amazon.aws.aws_credentials
+- amazon.aws.aws_region
+
+short_description: Look up AWS account attributes.
+description:
+ - Describes attributes of your AWS account. You can specify one of the listed
+ attribute choices or omit it to see all attributes.
+options:
+ attribute:
+ description: The attribute for which to get the value(s).
+ choices:
+ - supported-platforms
+ - default-vpc
+ - max-instances
+ - vpc-max-security-groups-per-interface
+ - max-elastic-ips
+ - vpc-max-elastic-ips
+ - has-ec2-classic
+'''
+
+EXAMPLES = """
+vars:
+ has_ec2_classic: "{{ lookup('aws_account_attribute', attribute='has-ec2-classic') }}"
+ # true | false
+
+ default_vpc_id: "{{ lookup('aws_account_attribute', attribute='default-vpc') }}"
+ # vpc-xxxxxxxx | none
+
+ account_details: "{{ lookup('aws_account_attribute', wantlist='true') }}"
+ # {'default-vpc': ['vpc-xxxxxxxx'], 'max-elastic-ips': ['5'], 'max-instances': ['20'],
+ # 'supported-platforms': ['VPC', 'EC2'], 'vpc-max-elastic-ips': ['5'], 'vpc-max-security-groups-per-interface': ['5']}
+
+"""
+
+RETURN = """
+_raw:
+ description:
+ Returns a boolean when I(attribute) is check_ec2_classic. Otherwise returns the value(s) of the attribute
+ (or all attributes if one is not specified).
+"""
+
+from ansible.errors import AnsibleError
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ raise AnsibleError("The lookup aws_account_attribute requires boto3 and botocore.")
+
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+
+
+def _boto3_conn(region, credentials):
+ boto_profile = credentials.pop('aws_profile', None)
+
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found.")
+ else:
+ raise AnsibleError("Insufficient credentials found.")
+ return connection
+
+
+def _get_credentials(options):
+ credentials = {}
+ credentials['aws_profile'] = options['aws_profile']
+ credentials['aws_secret_access_key'] = options['aws_secret_key']
+ credentials['aws_access_key_id'] = options['aws_access_key']
+ if options['aws_security_token']:
+ credentials['aws_session_token'] = options['aws_security_token']
+
+ return credentials
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+ boto_credentials = _get_credentials(self._options)
+
+ region = self._options['region']
+ client = _boto3_conn(region, boto_credentials)
+
+ attribute = kwargs.get('attribute')
+ params = {'AttributeNames': []}
+ check_ec2_classic = False
+ if 'has-ec2-classic' == attribute:
+ check_ec2_classic = True
+ params['AttributeNames'] = ['supported-platforms']
+ elif attribute:
+ params['AttributeNames'] = [attribute]
+
+ try:
+ response = client.describe_account_attributes(**params)['AccountAttributes']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ raise AnsibleError("Failed to describe account attributes: %s" % to_native(e))
+
+ if check_ec2_classic:
+ attr = response[0]
+ return any(value['AttributeValue'] == 'EC2' for value in attr['AttributeValues'])
+
+ if attribute:
+ attr = response[0]
+ return [value['AttributeValue'] for value in attr['AttributeValues']]
+
+ flattened = {}
+ for k_v_dict in response:
+ flattened[k_v_dict['AttributeName']] = [value['AttributeValue'] for value in k_v_dict['AttributeValues']]
+ return flattened
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py
new file mode 100644
index 00000000..39c49890
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py
@@ -0,0 +1,265 @@
+# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+lookup: aws_secret
+author:
+ - Aaron Smith <ajsmith10381@gmail.com>
+requirements:
+ - boto3
+ - botocore>=1.10.0
+extends_documentation_fragment:
+- amazon.aws.aws_credentials
+- amazon.aws.aws_region
+
+short_description: Look up secrets stored in AWS Secrets Manager.
+description:
+ - Look up secrets stored in AWS Secrets Manager provided the caller
+ has the appropriate permissions to read the secret.
+ - Lookup is based on the secret's I(Name) value.
+ - Optional parameters can be passed into this lookup; I(version_id) and I(version_stage)
+options:
+ _terms:
+ description: Name of the secret to look up in AWS Secrets Manager.
+ required: True
+ bypath:
+ description: A boolean to indicate whether the parameter is provided as a hierarchy.
+ default: false
+ type: boolean
+ version_added: 1.4.0
+ nested:
+ description: A boolean to indicate the secret contains nested values.
+ type: boolean
+ default: false
+ version_added: 1.4.0
+ version_id:
+ description: Version of the secret(s).
+ required: False
+ version_stage:
+ description: Stage of the secret version.
+ required: False
+ join:
+ description:
+ - Join two or more entries to form an extended secret.
+ - This is useful for overcoming the 4096 character limit imposed by AWS.
+ - No effect when used with I(bypath).
+ type: boolean
+ default: false
+ on_missing:
+ description:
+ - Action to take if the secret is missing.
+ - C(error) will raise a fatal error when the secret is missing.
+ - C(skip) will silently ignore the missing secret.
+ - C(warn) will skip over the missing secret but issue a warning.
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+ on_denied:
+ description:
+ - Action to take if access to the secret is denied.
+ - C(error) will raise a fatal error when access to the secret is denied.
+ - C(skip) will silently ignore the denied secret.
+ - C(warn) will skip over the denied secret but issue a warning.
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+'''
+
+EXAMPLES = r"""
+ - name: lookup secretsmanager secret in the current region
+ debug: msg="{{ lookup('amazon.aws.aws_secret', '/path/to/secrets', bypath=true) }}"
+
+ - name: Create RDS instance with aws_secret lookup for password param
+ rds:
+ command: create
+ instance_name: app-db
+ db_engine: MySQL
+ size: 10
+ instance_type: db.m1.small
+ username: dbadmin
+ password: "{{ lookup('amazon.aws.aws_secret', 'DbSecret') }}"
+ tags:
+ Environment: staging
+
+ - name: skip if secret does not exist
+ debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-not-exist', on_missing='skip')}}"
+
+ - name: warn if access to the secret is denied
+ debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-denied', on_denied='warn')}}"
+
+ - name: lookup secretsmanager secret in the current region using the nested feature
+ debug: msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', nested=true) }}"
+ # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`.
+ # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`.
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ Returns the value of the secret stored in AWS Secrets Manager.
+"""
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ raise AnsibleError("The lookup aws_secret requires boto3 and botocore.")
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+import json
+
+
+def _boto3_conn(region, credentials):
+ boto_profile = credentials.pop('aws_profile', None)
+
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found.")
+ else:
+ raise AnsibleError("Insufficient credentials found.")
+ return connection
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
+ aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
+ bypath=False, nested=False, join=False, version_stage=None, version_id=None, on_missing='error',
+ on_denied='error'):
+ '''
+ :arg terms: a list of lookups to run.
+ e.g. ['parameter_name', 'parameter_name_too' ]
+ :kwarg variables: ansible variables active at the time of the lookup
+ :kwarg aws_secret_key: identity of the AWS key to use
+ :kwarg aws_access_key: AWS secret key (matching identity)
+ :kwarg aws_security_token: AWS session key if using STS
+ :kwarg decrypt: Set to True to get decrypted parameters
+ :kwarg region: AWS region in which to do the lookup
+ :kwarg bypath: Set to True to do a lookup of variables under a path
+ :kwarg nested: Set to True to do a lookup of nested secrets
+ :kwarg join: Join two or more entries to form an extended secret
+ :kwarg version_stage: Stage of the secret version
+ :kwarg version_id: Version of the secret(s)
+ :kwarg on_missing: Action to take if the secret is missing
+ :kwarg on_denied: Action to take if access to the secret is denied
+ :returns: A list of parameter values or a list of dictionaries if bypath=True.
+ '''
+ if not HAS_BOTO3:
+ raise AnsibleError('botocore and boto3 are required for aws_ssm lookup.')
+
+ missing = on_missing.lower()
+ if not isinstance(missing, string_types) or missing not in ['error', 'warn', 'skip']:
+ raise AnsibleError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % missing)
+
+ denied = on_denied.lower()
+ if not isinstance(denied, string_types) or denied not in ['error', 'warn', 'skip']:
+ raise AnsibleError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % denied)
+
+ credentials = {}
+ if aws_profile:
+ credentials['aws_profile'] = aws_profile
+ else:
+ credentials['aws_profile'] = boto_profile
+ credentials['aws_secret_access_key'] = aws_secret_key
+ credentials['aws_access_key_id'] = aws_access_key
+ credentials['aws_session_token'] = aws_security_token
+
+ # fallback to IAM role credentials
+ if not credentials['aws_profile'] and not (
+ credentials['aws_access_key_id'] and credentials['aws_secret_access_key']):
+ session = botocore.session.get_session()
+ if session.get_credentials() is not None:
+ credentials['aws_access_key_id'] = session.get_credentials().access_key
+ credentials['aws_secret_access_key'] = session.get_credentials().secret_key
+ credentials['aws_session_token'] = session.get_credentials().token
+
+ client = _boto3_conn(region, credentials)
+
+ if bypath:
+ secrets = {}
+ for term in terms:
+ try:
+ response = client.list_secrets(Filters=[{'Key': 'name', 'Values': [term]}])
+
+ if 'SecretList' in response:
+ for secret in response['SecretList']:
+ secrets.update({secret['Name']: self.get_secret_value(secret['Name'], client,
+ on_missing=missing,
+ on_denied=denied)})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ raise AnsibleError("Failed to retrieve secret: %s" % to_native(e))
+ secrets = [secrets]
+ else:
+ secrets = []
+ for term in terms:
+ value = self.get_secret_value(term, client,
+ version_stage=version_stage, version_id=version_id,
+ on_missing=missing, on_denied=denied, nested=nested)
+ if value:
+ secrets.append(value)
+ if join:
+ joined_secret = []
+ joined_secret.append(''.join(secrets))
+ return joined_secret
+
+ return secrets
+
+ def get_secret_value(self, term, client, version_stage=None, version_id=None, on_missing=None, on_denied=None, nested=False):
+ params = {}
+ params['SecretId'] = term
+ if version_id:
+ params['VersionId'] = version_id
+ if version_stage:
+ params['VersionStage'] = version_stage
+ if nested:
+ if len(term.split('.')) < 2:
+ raise AnsibleError("Nested query must use the following syntax: `aws_secret_name.<key_name>.<key_name>")
+ secret_name = term.split('.')[0]
+ params['SecretId'] = secret_name
+
+ try:
+ response = client.get_secret_value(**params)
+ if 'SecretBinary' in response:
+ return response['SecretBinary']
+ if 'SecretString' in response:
+ if nested:
+ secrets = []
+ query = term.split('.')[1:]
+ secret_string = json.loads(response['SecretString'])
+ ret_val = secret_string
+ for key in query:
+ if key in ret_val:
+ ret_val = ret_val[key]
+ else:
+ raise AnsibleError("Successfully retrieved secret but there exists no key {0} in the secret".format(key))
+ return str(ret_val)
+ else:
+ return response['SecretString']
+ except is_boto3_error_code('ResourceNotFoundException'):
+ if on_missing == 'error':
+ raise AnsibleError("Failed to find secret %s (ResourceNotFound)" % term)
+ elif on_missing == 'warn':
+ self._display.warning('Skipping, did not find secret %s' % term)
+ except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except
+ if on_denied == 'error':
+ raise AnsibleError("Failed to access secret %s (AccessDenied)" % term)
+ elif on_denied == 'warn':
+ self._display.warning('Skipping, access denied for secret %s' % term)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ raise AnsibleError("Failed to retrieve secret: %s" % to_native(e))
+
+ return None
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
new file mode 100644
index 00000000..6a66120d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
@@ -0,0 +1,78 @@
+# (c) 2016 James Turner <turnerjsm@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+lookup: aws_service_ip_ranges
+author:
+ - James Turner <turnerjsm@gmail.com>
+requirements:
+ - must have public internet connectivity
+short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
+description:
+ - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
+ - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
+options:
+ service:
+ description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
+ region:
+ description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
+'''
+
+EXAMPLES = """
+vars:
+ ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
+tasks:
+
+- name: "use list return option and iterate as a loop"
+ debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
+# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
+
+- name: "Pull S3 IP ranges, and print the default return style"
+ debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
+# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
+"""
+
+RETURN = """
+_raw:
+ description: comma-separated list of CIDR ranges
+"""
+
+
+import json
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ try:
+ resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
+ amazon_response = json.load(resp)['prefixes']
+ except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
+ # on Python 3+, json.decoder.JSONDecodeError is raised for bad
+ # JSON. On 2.x it's a ValueError
+ raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
+ except HTTPError as e:
+ raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
+ except SSLValidationError as e:
+ raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
+ except URLError as e:
+ raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
+ except ConnectionError as e:
+ raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
+
+ if 'region' in kwargs:
+ region = kwargs['region']
+ amazon_response = (item for item in amazon_response if item['region'] == region)
+ if 'service' in kwargs:
+ service = str.upper(kwargs['service'])
+ amazon_response = (item for item in amazon_response if item['service'] == service)
+
+ return [item['ip_prefix'] for item in amazon_response]
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py
new file mode 100644
index 00000000..fe2798e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py
@@ -0,0 +1,234 @@
+# (c) 2016, Bill Wang <ozbillwang(at)gmail.com>
+# (c) 2017, Marat Bakeev <hawara(at)gmail.com>
+# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+lookup: aws_ssm
+author:
+ - Bill Wang <ozbillwang(at)gmail.com>
+ - Marat Bakeev <hawara(at)gmail.com>
+ - Michael De La Rue <siblemitcom.mddlr@spamgourmet.com>
+requirements:
+ - boto3
+ - botocore
+short_description: Get the value for a SSM parameter or all parameters under a path.
+description:
+ - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters.
+ The first argument you pass the lookup can either be a parameter name or a hierarchy of
+ parameters. Hierarchies start with a forward slash and end with the parameter name. Up to
+ 5 layers may be specified.
+ - If looking up an explicitly listed parameter by name which does not exist then the lookup will
+ return a None value which will be interpreted by Jinja2 as an empty string. You can use the
+ ```default``` filter to give a default value in this case but must set the second parameter to
+ true (see examples below)
+ - When looking up a path for parameters under it a dictionary will be returned for each path.
+ If there is no parameter under that path then the return will be successful but the
+ dictionary will be empty.
+ - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm
+ will generate an error, normally crashing the current ansible task. This is normally the right
+ thing since ignoring a value that IAM isn't giving access to could cause bigger problems and
+ wrong behaviour or loss of data. If you want to continue in this case then you will have to set
+ up two ansible tasks, one which sets a variable and ignores failures one which uses the value
+ of that variable with a default. See the examples below.
+
+options:
+ decrypt:
+ description: A boolean to indicate whether to decrypt the parameter.
+ default: true
+ type: boolean
+ bypath:
+ description: A boolean to indicate whether the parameter is provided as a hierarchy.
+ default: false
+ type: boolean
+ recursive:
+ description: A boolean to indicate whether to retrieve all parameters within a hierarchy.
+ default: false
+ type: boolean
+ shortnames:
+ description: Indicates whether to return the name only without path if using a parameter hierarchy.
+ default: false
+ type: boolean
+'''
+
+EXAMPLES = '''
+# lookup sample:
+- name: lookup ssm parameter store in the current region
+ debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}"
+
+- name: lookup ssm parameter store in nominated region
+ debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}"
+
+- name: lookup ssm parameter store without decrypted
+ debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}"
+
+- name: lookup ssm parameter store in nominated aws profile
+ debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}"
+
+- name: lookup ssm parameter store using explicit aws credentials
+ debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}"
+
+- name: lookup ssm parameter store with all options.
+ debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}"
+
+- name: lookup a key which doesn't exist, returns ""
+ debug: msg="{{ lookup('aws_ssm', 'NoKey') }}"
+
+- name: lookup a key which doesn't exist, returning a default ('root')
+ debug: msg="{{ lookup('aws_ssm', 'AdminID') | default('root', true) }}"
+
+- name: lookup a key which doesn't exist failing to store it in a fact
+ set_fact:
+ temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}"
+ ignore_errors: true
+
+- name: show fact default to "access failed" if we don't have access
+ debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}"
+
+- name: return a dictionary of ssm parameters from a hierarchy path
+ debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}"
+
+- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param)
+ debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}"
+
+- name: Iterate over a parameter hierarchy (one iteration per parameter)
+ debug: msg='Key contains {{ item.key }} , with value {{ item.value }}'
+ loop: '{{ lookup("aws_ssm", "/demo/", region="ap-southeast-2", bypath=True) | dict2items }}'
+
+- name: Iterate over multiple paths as dictionaries (one iteration per path)
+ debug: msg='Path contains {{ item }}'
+ loop: '{{ lookup("aws_ssm", "/demo/", "/demo1/", bypath=True)}}'
+
+'''
+
+try:
+ from botocore.exceptions import ClientError
+ import botocore
+ import boto3
+except ImportError:
+ pass # will be captured by imported HAS_BOTO3
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+from ..module_utils.ec2 import HAS_BOTO3
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+display = Display()
+
+
+def _boto3_conn(region, credentials):
+ if 'boto_profile' in credentials:
+ boto_profile = credentials.pop('boto_profile')
+ else:
+ boto_profile = None
+
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
+ if boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region)
+ # FIXME: we should probably do better passing on of the error information
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
+ raise AnsibleError("Insufficient credentials found.")
+ else:
+ raise AnsibleError("Insufficient credentials found.")
+ return connection
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
+ aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
+ bypath=False, shortnames=False, recursive=False, decrypt=True):
+ '''
+ :arg terms: a list of lookups to run.
+ e.g. ['parameter_name', 'parameter_name_too' ]
+ :kwarg variables: ansible variables active at the time of the lookup
+ :kwarg aws_secret_key: identity of the AWS key to use
+ :kwarg aws_access_key: AWS secret key (matching identity)
+ :kwarg aws_security_token: AWS session key if using STS
+ :kwarg decrypt: Set to True to get decrypted parameters
+ :kwarg region: AWS region in which to do the lookup
+ :kwarg bypath: Set to True to do a lookup of variables under a path
+ :kwarg recursive: Set to True to recurse below the path (requires bypath=True)
+ :returns: A list of parameter values or a list of dictionaries if bypath=True.
+ '''
+
+ if not HAS_BOTO3:
+ raise AnsibleError('botocore and boto3 are required for aws_ssm lookup.')
+
+ ret = []
+ response = {}
+ ssm_dict = {}
+
+ credentials = {}
+ if aws_profile:
+ credentials['boto_profile'] = aws_profile
+ else:
+ credentials['boto_profile'] = boto_profile
+ credentials['aws_secret_access_key'] = aws_secret_key
+ credentials['aws_access_key_id'] = aws_access_key
+ credentials['aws_session_token'] = aws_security_token
+
+ client = _boto3_conn(region, credentials)
+
+ ssm_dict['WithDecryption'] = decrypt
+
+ # Lookup by path
+ if bypath:
+ ssm_dict['Recursive'] = recursive
+ for term in terms:
+ ssm_dict["Path"] = term
+ display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region))
+ try:
+ response = client.get_parameters_by_path(**ssm_dict)
+ except ClientError as e:
+ raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
+ paramlist = list()
+ paramlist.extend(response['Parameters'])
+
+ # Manual pagination, since boto doesn't support it yet for get_parameters_by_path
+ while 'NextToken' in response:
+ response = client.get_parameters_by_path(NextToken=response['NextToken'], **ssm_dict)
+ paramlist.extend(response['Parameters'])
+
+ # shorten parameter names. yes, this will return duplicate names with different values.
+ if shortnames:
+ for x in paramlist:
+ x['Name'] = x['Name'][x['Name'].rfind('/') + 1:]
+
+ display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist))
+ if len(paramlist):
+ ret.append(boto3_tag_list_to_ansible_dict(paramlist,
+ tag_name_key_name="Name",
+ tag_value_key_name="Value"))
+ else:
+ ret.append({})
+ # Lookup by parameter name - always returns a list with one or no entry.
+ else:
+ display.vvv("AWS_ssm name lookup term: %s" % terms)
+ ssm_dict["Names"] = terms
+ try:
+ response = client.get_parameters(**ssm_dict)
+ except ClientError as e:
+ raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
+ params = boto3_tag_list_to_ansible_dict(response['Parameters'], tag_name_key_name="Name",
+ tag_value_key_name="Value")
+ for i in terms:
+ if i.split(':', 1)[0] in params:
+ ret.append(params[i])
+ elif i in response['InvalidParameters']:
+ ret.append(None)
+ else:
+ raise AnsibleError("Ansible internal error: aws_ssm lookup failed to understand boto3 return value: {0}".format(str(response)))
+ return ret
+
+ display.vvvv("AWS_ssm path lookup returning: %s " % str(ret))
+ return ret
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/acm.py
new file mode 100644
index 00000000..0fc86516
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/acm.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
+# on behalf of Telstra Corporation Limited
+#
+# Common functionality to be used by the modules:
+# - acm
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Common Amazon Certificate Manager facts shared between modules
+"""
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+
+
+class ACMServiceManager(object):
+ """Handles ACM Facts Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('acm')
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['RequestInProgressException'])
+ def delete_certificate_with_backoff(self, client, arn):
+ client.delete_certificate(CertificateArn=arn)
+
+ def delete_certificate(self, client, module, arn):
+ module.debug("Attempting to delete certificate %s" % arn)
+ try:
+ self.delete_certificate_with_backoff(client, arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn)
+ module.debug("Successfully deleted certificate %s" % arn)
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['RequestInProgressException'])
+ def list_certificates_with_backoff(self, client, statuses=None):
+ paginator = client.get_paginator('list_certificates')
+ kwargs = dict()
+ if statuses:
+ kwargs['CertificateStatuses'] = statuses
+ return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList']
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
+ def get_certificate_with_backoff(self, client, certificate_arn):
+ response = client.get_certificate(CertificateArn=certificate_arn)
+ # strip out response metadata
+ return {'Certificate': response['Certificate'],
+ 'CertificateChain': response['CertificateChain']}
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
+ def describe_certificate_with_backoff(self, client, certificate_arn):
+ return client.describe_certificate(CertificateArn=certificate_arn)['Certificate']
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
+ def list_certificate_tags_with_backoff(self, client, certificate_arn):
+ return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags']
+
+ # Returns a list of certificates
+ # if domain_name is specified, returns only certificates with that domain
+ # if an ARN is specified, returns only that certificate
+ # only_tags is a dict, e.g. {'key':'value'}. If specified this function will return
+ # only certificates which contain all those tags (key exists, value matches).
+ def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None):
+ try:
+ all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain certificates")
+ if domain_name:
+ certificates = [cert for cert in all_certificates
+ if cert['DomainName'] == domain_name]
+ else:
+ certificates = all_certificates
+
+ if arn:
+ # still return a list, not just one item
+ certificates = [c for c in certificates if c['CertificateArn'] == arn]
+
+ results = []
+ for certificate in certificates:
+ try:
+ cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName'])
+
+ # in some states, ACM resources do not have a corresponding cert
+ if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']:
+ try:
+ cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn']))
+ except (BotoCoreError, ClientError, KeyError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName'])
+ cert_data = camel_dict_to_snake_dict(cert_data)
+ try:
+ tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName'])
+
+ cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags)
+ results.append(cert_data)
+
+ if only_tags:
+ for tag_key in only_tags:
+ try:
+ results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])]
+ except (TypeError, AttributeError) as e:
+ for c in results:
+ if 'tags' not in c:
+ module.debug("cert is %s" % str(c))
+ module.fail_json(msg="ACM tag filtering err", exception=e)
+
+ return results
+
+ # returns the domain name of a certificate (encoded in the public cert)
+ # for a given ARN
+ # A cert with that ARN must already exist
+ def get_domain_of_cert(self, client, module, arn):
+ if arn is None:
+ module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified")
+ try:
+ cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn)
+ return cert_data['DomainName']
+
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+ def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn):
+ if certificate_chain:
+ if arn:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateChain=to_bytes(certificate_chain),
+ CertificateArn=arn)
+ else:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateChain=to_bytes(certificate_chain))
+ else:
+ if arn:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateArn=arn)
+ else:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key))
+ return ret['CertificateArn']
+
+ # Tags are a normal Ansible style dict
+ # {'Key':'Value'}
+ @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
+ def tag_certificate_with_backoff(self, client, arn, tags):
+ aws_tags = ansible_dict_to_boto3_tag_list(tags)
+ client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags)
+
+ def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None):
+
+ original_arn = arn
+
+ # upload cert
+ try:
+ arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't upload new certificate")
+
+ if original_arn and (arn != original_arn):
+ # I'm not sure whether the API guarentees that the ARN will not change
+ # I'm failing just in case.
+ # If I'm wrong, I'll catch it in the integration tests.
+ module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn))
+
+ # tag that cert
+ try:
+ self.tag_certificate_with_backoff(client, arn, tags)
+ except (BotoCoreError, ClientError) as e:
+ module.debug("Attempting to delete the cert we just created, arn=%s" % arn)
+ try:
+ self.delete_certificate_with_backoff(client, arn)
+ except Exception as f:
+ module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.")
+ module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn)
+ module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn)
+
+ return arn
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/batch.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/batch.py
new file mode 100644
index 00000000..648d0a38
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/batch.py
@@ -0,0 +1,106 @@
+# Copyright (c) 2017 Ansible Project
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Batch modules.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass
+
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from .ec2 import boto3_conn
+from .ec2 import get_aws_connection_info
+
+
+class AWSConnection(object):
+ """
+ Create the connection object and client objects as required.
+ """
+
+ def __init__(self, ansible_obj, resources, boto3=True):
+
+ ansible_obj.deprecate("The 'AWSConnection' class is deprecated, please use 'AnsibleAWSModule.client()'",
+ date='2022-06-01', collection_name='amazon.aws')
+
+ self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
+
+ self.resource_client = dict()
+ if not resources:
+ resources = ['batch']
+
+ resources.append('iam')
+
+ for resource in resources:
+ aws_connect_kwargs.update(dict(region=self.region,
+ endpoint=self.endpoint,
+ conn_type='client',
+ resource=resource
+ ))
+ self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
+
+ # if region is not provided, then get default profile/session region
+ if not self.region:
+ self.region = self.resource_client['batch'].meta.region_name
+
+ # set account ID
+ try:
+ self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
+ except (ClientError, ValueError, KeyError, IndexError):
+ self.account_id = ''
+
+ def client(self, resource='batch'):
+ return self.resource_client[resource]
+
+
+def cc(key):
+ """
+ Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes
+ 'computeEnvironmentName'.
+
+ :param key:
+ :return:
+ """
+ components = key.split('_')
+ return components[0] + "".join([token.capitalize() for token in components[1:]])
+
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+ :param module:
+ :param module_params:
+ :return:
+ """
+ api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
+ return snake_dict_to_camel_dict(api_params)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/cloud.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
new file mode 100644
index 00000000..8d5eeba3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
@@ -0,0 +1,220 @@
+#
+# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+This module adds shared support for generic cloud modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+from ansible.module_utils.cloud import CloudRetry
+
+The 'cloud' module provides the following common classes:
+
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+
+ - Example using the AWSRetry class which inherits from CloudRetry.
+
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class):
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
new file mode 100644
index 00000000..994b84da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
@@ -0,0 +1,231 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Willem van Ketwich
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# - Willem van Ketwich <willem@vanketwich.com.au>
+#
+# Common functionality to be used by the modules:
+# - cloudfront_distribution
+# - cloudfront_invalidation
+# - cloudfront_origin_access_identity
+"""
+Common cloudfront facts shared between modules
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+from .ec2 import boto3_tag_list_to_ansible_dict
+
+
+class CloudFrontFactsServiceManager(object):
+ """Handles CloudFront Facts Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff())
+
+ def get_distribution(self, distribution_id):
+ try:
+ return self.client.get_distribution(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing distribution")
+
+ def get_distribution_config(self, distribution_id):
+ try:
+ return self.client.get_distribution_config(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing distribution configuration")
+
+ def get_origin_access_identity(self, origin_access_identity_id):
+ try:
+ return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing origin access identity")
+
+ def get_origin_access_identity_config(self, origin_access_identity_id):
+ try:
+ return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
+
+ def get_invalidation(self, distribution_id, invalidation_id):
+ try:
+ return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing invalidation")
+
+ def get_streaming_distribution(self, distribution_id):
+ try:
+ return self.client.get_streaming_distribution(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing streaming distribution")
+
+ def get_streaming_distribution_config(self, distribution_id):
+ try:
+ return self.client.get_streaming_distribution_config(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing streaming distribution")
+
+ def list_origin_access_identities(self):
+ try:
+ paginator = self.client.get_paginator('list_cloud_front_origin_access_identities')
+ result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {})
+ return result.get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
+
+ def list_distributions(self, keyed=True):
+ try:
+ paginator = self.client.get_paginator('list_distributions')
+ result = paginator.paginate().build_full_result().get('DistributionList', {})
+ distribution_list = result.get('Items', [])
+ if not keyed:
+ return distribution_list
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing distributions")
+
+ def list_distributions_by_web_acl_id(self, web_acl_id):
+ try:
+ result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id, aws_retry=True)
+ distribution_list = result.get('DistributionList', {}).get('Items', [])
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
+
+ def list_invalidations(self, distribution_id):
+ try:
+ paginator = self.client.get_paginator('list_invalidations')
+ result = paginator.paginate(DistributionId=distribution_id).build_full_result()
+ return result.get('InvalidationList', {}).get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing invalidations")
+
+ def list_streaming_distributions(self, keyed=True):
+ try:
+ paginator = self.client.get_paginator('list_streaming_distributions')
+ result = paginator.paginate().build_full_result()
+ streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', [])
+ if not keyed:
+ return streaming_distribution_list
+ return self.keyed_list_helper(streaming_distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing streaming distributions")
+
+ def summary(self):
+ summary_dict = {}
+ summary_dict.update(self.summary_get_distribution_list(False))
+ summary_dict.update(self.summary_get_distribution_list(True))
+ summary_dict.update(self.summary_get_origin_access_identity_list())
+ return summary_dict
+
+ def summary_get_origin_access_identity_list(self):
+ try:
+ origin_access_identity_list = {'origin_access_identities': []}
+ origin_access_identities = self.list_origin_access_identities()
+ for origin_access_identity in origin_access_identities:
+ oai_id = origin_access_identity['Id']
+ oai_full_response = self.get_origin_access_identity(oai_id)
+ oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
+ origin_access_identity_list['origin_access_identities'].append(oai_summary)
+ return origin_access_identity_list
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
+
+ def summary_get_distribution_list(self, streaming=False):
+ try:
+ list_name = 'streaming_distributions' if streaming else 'distributions'
+ key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
+ distribution_list = {list_name: []}
+ distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
+ for dist in distributions:
+ temp_distribution = {}
+ for key_name in key_list:
+ temp_distribution[key_name] = dist[key_name]
+ temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
+ temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
+ if not streaming:
+ temp_distribution['WebACLId'] = dist['WebACLId']
+ invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
+ if invalidation_ids:
+ temp_distribution['Invalidations'] = invalidation_ids
+ resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'], aws_retry=True)
+ temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
+ distribution_list[list_name].append(temp_distribution)
+ return distribution_list
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of distributions")
+ except Exception as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of distributions")
+
+ def get_etag_from_distribution_id(self, distribution_id, streaming):
+ distribution = {}
+ if not streaming:
+ distribution = self.get_distribution(distribution_id)
+ else:
+ distribution = self.get_streaming_distribution(distribution_id)
+ return distribution['ETag']
+
+ def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
+ try:
+ invalidation_ids = []
+ invalidations = self.list_invalidations(distribution_id)
+ for invalidation in invalidations:
+ invalidation_ids.append(invalidation['Id'])
+ return invalidation_ids
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
+
+ def get_distribution_id_from_domain_name(self, domain_name):
+ try:
+ distribution_id = ""
+ distributions = self.list_distributions(False)
+ distributions += self.list_streaming_distributions(False)
+ for dist in distributions:
+ if 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ if str(alias).lower() == domain_name.lower():
+ distribution_id = dist['Id']
+ break
+ return distribution_id
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
+
+ def get_aliases_from_distribution_id(self, distribution_id):
+ try:
+ distribution = self.get_distribution(distribution_id)
+ return distribution['DistributionConfig']['Aliases'].get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
+
+ def keyed_list_helper(self, list_to_key):
+ keyed_list = dict()
+ for item in list_to_key:
+ distribution_id = item['Id']
+ if 'Items' in item['Aliases']:
+ aliases = item['Aliases']['Items']
+ for alias in aliases:
+ keyed_list.update({alias: item})
+ keyed_list.update({distribution_id: item})
+ return keyed_list
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/core.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/core.py
new file mode 100644
index 00000000..349ec3b4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/core.py
@@ -0,0 +1,381 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module adds shared support for generic Amazon AWS modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+ module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
+ mutually_exclusive=list1, required_together=list2)
+
+The 'AnsibleAWSModule' module provides similar, but more restricted,
+interfaces to the normal Ansible module. It also includes the
+additional methods for connecting to AWS using the standard module arguments
+
+ m.resource('lambda') # - get an AWS connection as a boto3 resource.
+
+or
+
+ m.client('sts') # - get an AWS connection as a boto3 client.
+
+To make use of AWSRetry easier, it can now be wrapped around any call from a
+module-created client. To add retries to a client, create a client:
+
+ m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+ ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import logging
+import traceback
+from functools import wraps
+from distutils.version import LooseVersion
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ # Python 3
+ from io import StringIO
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils._text import to_native
+
+from .ec2 import HAS_BOTO3
+from .ec2 import boto3_conn
+from .ec2 import ec2_argument_spec
+from .ec2 import get_aws_connection_info
+from .ec2 import get_aws_region
+
+# We will also export HAS_BOTO3 so end user modules can use it.
+__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code', 'is_boto3_error_message')
+
+
+class AnsibleAWSModule(object):
+ """An ansible module class for AWS modules
+
+ AnsibleAWSModule provides an a class for building modules which
+ connect to Amazon Web Services. The interface is currently more
+ restricted than the basic module class with the aim that later the
+ basic module class can be reduced. If you find that any key
+ feature is missing please contact the author/Ansible AWS team
+ (available on #ansible-aws on IRC) to request the additional
+ features needed.
+ """
+ default_settings = {
+ "default_args": True,
+ "check_boto3": True,
+ "auto_retry": True,
+ "module_class": AnsibleModule
+ }
+
+ def __init__(self, **kwargs):
+ local_settings = {}
+ for key in AnsibleAWSModule.default_settings:
+ try:
+ local_settings[key] = kwargs.pop(key)
+ except KeyError:
+ local_settings[key] = AnsibleAWSModule.default_settings[key]
+ self.settings = local_settings
+
+ if local_settings["default_args"]:
+ # ec2_argument_spec contains the region so we use that; there's a patch coming which
+ # will add it to aws_argument_spec so if that's accepted then later we should change
+ # over
+ argument_spec_full = ec2_argument_spec()
+ try:
+ argument_spec_full.update(kwargs["argument_spec"])
+ except (TypeError, NameError):
+ pass
+ kwargs["argument_spec"] = argument_spec_full
+
+ self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
+
+ if local_settings["check_boto3"] and not HAS_BOTO3:
+ self._module.fail_json(
+ msg=missing_required_lib('botocore or boto3'))
+
+ self.check_mode = self._module.check_mode
+ self._diff = self._module._diff
+ self._name = self._module._name
+
+ self._botocore_endpoint_log_stream = StringIO()
+ self.logger = None
+ if self.params.get('debug_botocore_endpoint_logs'):
+ self.logger = logging.getLogger('botocore.endpoint')
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
+
+ @property
+ def params(self):
+ return self._module.params
+
+ def _get_resource_action_list(self):
+ actions = []
+ for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
+ ln = ln.strip()
+ if not ln:
+ continue
+ found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
+ if found_operational_request:
+ operation_request = found_operational_request.group(0)[20:-1]
+ resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
+ actions.append("{0}:{1}".format(resource, operation_request))
+ return list(set(actions))
+
+ def exit_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.exit_json(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.fail_json(*args, **kwargs)
+
+ def debug(self, *args, **kwargs):
+ return self._module.debug(*args, **kwargs)
+
+ def warn(self, *args, **kwargs):
+ return self._module.warn(*args, **kwargs)
+
+ def deprecate(self, *args, **kwargs):
+ return self._module.deprecate(*args, **kwargs)
+
+ def boolean(self, *args, **kwargs):
+ return self._module.boolean(*args, **kwargs)
+
+ def md5(self, *args, **kwargs):
+ return self._module.md5(*args, **kwargs)
+
+ def client(self, service, retry_decorator=None):
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ conn = boto3_conn(self, conn_type='client', resource=service,
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
+
+ def resource(self, service):
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ return boto3_conn(self, conn_type='resource', resource=service,
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+
+ @property
+ def region(self, boto3=True):
+ return get_aws_region(self, boto3)
+
+ def fail_json_aws(self, exception, msg=None, **kwargs):
+ """call fail_json with processed exception
+
+ function for converting exceptions thrown by AWS SDK modules,
+ botocore, boto3 and boto, into nice error messages.
+ """
+ last_traceback = traceback.format_exc()
+
+ # to_native is trusted to handle exceptions that str() could
+ # convert to text.
+ try:
+ except_msg = to_native(exception.message)
+ except AttributeError:
+ except_msg = to_native(exception)
+
+ if msg is not None:
+ message = '{0}: {1}'.format(msg, except_msg)
+ else:
+ message = except_msg
+
+ try:
+ response = exception.response
+ except AttributeError:
+ response = None
+
+ failure = dict(
+ msg=message,
+ exception=last_traceback,
+ **self._gather_versions()
+ )
+
+ failure.update(kwargs)
+
+ if response is not None:
+ failure.update(**camel_dict_to_snake_dict(response))
+
+ self.fail_json(**failure)
+
+ def _gather_versions(self):
+ """Gather AWS SDK (boto3 and botocore) dependency versions
+
+ Returns {'boto3_version': str, 'botocore_version': str}
+ Returns {} if neither are installed
+ """
+ if not HAS_BOTO3:
+ return {}
+ import boto3
+ import botocore
+ return dict(boto3_version=boto3.__version__,
+ botocore_version=botocore.__version__)
+
+ def boto3_at_least(self, desired):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ Usage:
+ if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
+ # conditionally fail on old boto3 versions if a specific feature is not supported
+ module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
+
+ def botocore_at_least(self, desired):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ Usage:
+ if not module.botocore_at_least('1.2.3'):
+ module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
+ if not module.botocore_at_least('1.5.3'):
+ module.warn('Botocore did not include waiters for Service X before 1.5.3. '
+ 'To wait until Service X resources are fully available, update botocore.')
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
+
+
+class _RetryingBotoClientWrapper(object):
+ __never_wait = (
+ 'get_paginator', 'can_paginate',
+ 'get_waiter', 'generate_presigned_url',
+ )
+
+ def __init__(self, client, retry):
+ self.client = client
+ self.retry = retry
+
+ def _create_optional_retry_wrapper_function(self, unwrapped):
+ retrying_wrapper = self.retry(unwrapped)
+
+ @wraps(unwrapped)
+ def deciding_wrapper(aws_retry=False, *args, **kwargs):
+ if aws_retry:
+ return retrying_wrapper(*args, **kwargs)
+ else:
+ return unwrapped(*args, **kwargs)
+ return deciding_wrapper
+
+ def __getattr__(self, name):
+ unwrapped = getattr(self.client, name)
+ if name in self.__never_wait:
+ return unwrapped
+ elif callable(unwrapped):
+ wrapped = self._create_optional_retry_wrapper_function(unwrapped)
+ setattr(self, name, wrapped)
+ return wrapped
+ else:
+ return unwrapped
+
+
+def is_boto3_error_code(code, e=None):
+ """Check if the botocore exception is raised by a specific error code.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_instances(InstanceIds=['potato'])
+ except is_boto3_error_code('InvalidInstanceID.Malformed'):
+ # handle the error for that code case
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if not isinstance(code, list):
+ code = [code]
+ if isinstance(e, ClientError) and e.response['Error']['Code'] in code:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def is_boto3_error_message(msg, e=None):
+ """Check if the botocore exception contains a specific error message.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_vpc_classic_link(VpcIds=[vpc_id])
+ except is_boto3_error_message('The functionality you requested is not available in this region.'):
+ # handle the error for that error message
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if isinstance(e, ClientError) and msg in e.response['Error']['Message']:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def get_boto3_client_method_parameters(client, method_name, required=False):
+ op = client.meta.method_to_api_mapping.get(method_name)
+ input_shape = client._service_model.operation_model(op).input_shape
+ if not input_shape:
+ parameters = []
+ elif required:
+ parameters = list(input_shape.required_members)
+ else:
+ parameters = list(input_shape.members.keys())
+ return parameters
+
+
+def scrub_none_parameters(parameters):
+ """
+ Iterate over a dictionary removing any keys that have a None value
+
+ Reference: https://github.com/ansible-collections/community.aws/issues/251
+ Credit: https://medium.com/better-programming/how-to-remove-null-none-values-from-a-dictionary-in-python-1bedf1aab5e4
+
+ :param parameters: parameter dict
+ :return: parameter dict with all keys = None removed
+ """
+
+ clean_parameters = {}
+
+ for k, v in parameters.items():
+ if isinstance(v, dict):
+ clean_parameters[k] = scrub_none_parameters(v)
+ elif v is not None:
+ clean_parameters[k] = v
+
+ return clean_parameters
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
new file mode 100644
index 00000000..abcbcfd2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2017 Ansible Project
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Direct Connect modules.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+
+
+class DirectConnectError(Exception):
+ def __init__(self, msg, last_traceback=None, exception=None):
+ self.msg = msg
+ self.last_traceback = last_traceback
+ self.exception = exception
+
+
+def delete_connection(client, connection_id):
+ try:
+ AWSRetry.jittered_backoff()(client.delete_connection)(connectionId=connection_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def associate_connection_and_lag(client, connection_id, lag_id):
+ try:
+ AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id,
+ lagId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}"
+ " with link aggregation group {1}.".format(connection_id, lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def disassociate_connection_and_lag(client, connection_id, lag_id):
+ try:
+ AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id,
+ lagId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}"
+ " from link aggregation group {1}.".format(connection_id, lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def delete_virtual_interface(client, virtual_interface):
+ try:
+ AWSRetry.jittered_backoff()(client.delete_virtual_interface)(virtualInterfaceId=virtual_interface)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface),
+ last_traceback=traceback.format_exc(),
+ exception=e)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
new file mode 100644
index 00000000..e2278992
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
@@ -0,0 +1,807 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import sys
+import traceback
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six import binary_type
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import text_type
+from ansible.module_utils.six import integer_types
+# Used to live here, moved into ansible.module_utils.common.dict_transformations
+from ansible.module_utils.common.dict_transformations import _camel_to_snake # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import _snake_to_camel # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict # pylint: disable=unused-import
+
+from .cloud import CloudRetry
+
+BOTO_IMP_ERR = None
+try:
+ import boto
+ import boto.ec2 # boto does weird import stuff
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+BOTO3_IMP_ERR = None
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ BOTO3_IMP_ERR = traceback.format_exc()
+ HAS_BOTO3 = False
+
+try:
+ # Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
+ # uses this (and it works as expected). Python 2.6 will trigger the ImportError.
+ from functools import cmp_to_key
+ PY3_COMPARISON = True
+except ImportError:
+ PY3_COMPARISON = False
+
+
+class AnsibleAWSError(Exception):
+ pass
+
+
+def _botocore_exception_maybe():
+ """
+ Allow for boto3 not being installed when using these utils by wrapping
+ botocore.exceptions instead of assigning from it directly.
+ """
+ if HAS_BOTO3:
+ return botocore.exceptions.ClientError
+ return type(None)
+
+
+class AWSRetry(CloudRetry):
+ base_class = _botocore_exception_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.response['Error']['Code']
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This list of failures is based on this API Reference
+ # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
+ #
+ # TooManyRequestsException comes from inside botocore when it
+ # does retrys, unfortunately however it does not try long
+ # enough to allow some services such as API Gateway to
+ # complete configuration. At the moment of writing there is a
+ # botocore/boto3 bug open to fix this.
+ #
+ # https://github.com/boto/boto3/issues/876 (and linked PRs etc)
+ retry_on = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling'
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
+
+
+def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
+ try:
+ return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
+ except ValueError as e:
+ module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
+ module.fail_json(msg=to_native(e))
+ except botocore.exceptions.NoRegionError as e:
+ module.fail_json(msg="The %s module requires a region and none was found in configuration, "
+ "environment variables or module parameters" % module._name)
+
+
+def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
+ profile = params.pop('profile_name', None)
+
+ if conn_type not in ['both', 'resource', 'client']:
+ raise ValueError('There is an issue in the calling code. You '
+ 'must specify either both, resource, or client to '
+ 'the conn_type parameter in the boto3_conn function '
+ 'call')
+
+ config = botocore.config.Config(
+ user_agent_extra='Ansible/{0}'.format(__version__),
+ )
+
+ if params.get('config') is not None:
+ config = config.merge(params.pop('config'))
+ if params.get('aws_config') is not None:
+ config = config.merge(params.pop('aws_config'))
+
+ session = boto3.session.Session(
+ profile_name=profile,
+ )
+
+ if conn_type == 'resource':
+ return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ elif conn_type == 'client':
+ return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ else:
+ client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
+ resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ return client, resource
+
+
+boto3_inventory_conn = _boto3_conn
+
+
+def boto_exception(err):
+ """
+ Extracts the error message from a boto exception.
+
+ :param err: Exception from boto
+ :return: Error message
+ """
+ if hasattr(err, 'error_message'):
+ error = err.error_message
+ elif hasattr(err, 'message'):
+ error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
+ else:
+ error = '%s: %s' % (Exception, err)
+
+ return error
+
+
+def aws_common_argument_spec():
+ return dict(
+ debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
+ ec2_url=dict(aliases=['aws_endpoint_url', 'endpoint_url']),
+ aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
+ aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
+ security_token=dict(aliases=['access_token', 'aws_security_token'], no_log=True),
+ validate_certs=dict(default=True, type='bool'),
+ aws_ca_bundle=dict(type='path'),
+ profile=dict(aliases=['aws_profile']),
+ aws_config=dict(type='dict'),
+ )
+
+
+def ec2_argument_spec():
+ spec = aws_common_argument_spec()
+ spec.update(
+ dict(
+ region=dict(aliases=['aws_region', 'ec2_region']),
+ )
+ )
+ return spec
+
+
+def get_aws_region(module, boto3=False):
+ region = module.params.get('region')
+
+ if region:
+ return region
+
+ if 'AWS_REGION' in os.environ:
+ return os.environ['AWS_REGION']
+ if 'AWS_DEFAULT_REGION' in os.environ:
+ return os.environ['AWS_DEFAULT_REGION']
+ if 'EC2_REGION' in os.environ:
+ return os.environ['EC2_REGION']
+
+ if not boto3:
+ if not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
+ # boto.config.get returns None if config not found
+ region = boto.config.get('Boto', 'aws_region')
+ if region:
+ return region
+ return boto.config.get('Boto', 'ec2_region')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
+
+ # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
+ try:
+ profile_name = module.params.get('profile')
+ return botocore.session.Session(profile=profile_name).get_config_variable('region')
+ except botocore.exceptions.ProfileNotFound as e:
+ return None
+
+
+def get_aws_connection_info(module, boto3=False):
+
+ # Check module args for credentials, then check environment vars
+ # access_key
+
+ ec2_url = module.params.get('ec2_url')
+ access_key = module.params.get('aws_access_key')
+ secret_key = module.params.get('aws_secret_key')
+ security_token = module.params.get('security_token')
+ region = get_aws_region(module, boto3)
+ profile_name = module.params.get('profile')
+ validate_certs = module.params.get('validate_certs')
+ ca_bundle = module.params.get('aws_ca_bundle')
+ config = module.params.get('aws_config')
+
+ # Only read the profile environment variables if we've *not* been passed
+ # any credentials as parameters.
+ if not profile_name and not access_key and not secret_key:
+ if os.environ.get('AWS_PROFILE'):
+ profile_name = os.environ.get('AWS_PROFILE')
+ if os.environ.get('AWS_DEFAULT_PROFILE'):
+ profile_name = os.environ.get('AWS_DEFAULT_PROFILE')
+
+ if profile_name and (access_key or secret_key or security_token):
+ module.deprecate("Passing both a profile and access tokens has been deprecated."
+ " Only the profile will be used."
+ " In later versions of Ansible the options will be mutually exclusive",
+ date='2022-06-01', collection_name='amazon.aws')
+
+ if not ec2_url:
+ if 'AWS_URL' in os.environ:
+ ec2_url = os.environ['AWS_URL']
+ elif 'EC2_URL' in os.environ:
+ ec2_url = os.environ['EC2_URL']
+
+ if not access_key:
+ if os.environ.get('AWS_ACCESS_KEY_ID'):
+ access_key = os.environ['AWS_ACCESS_KEY_ID']
+ elif os.environ.get('AWS_ACCESS_KEY'):
+ access_key = os.environ['AWS_ACCESS_KEY']
+ elif os.environ.get('EC2_ACCESS_KEY'):
+ access_key = os.environ['EC2_ACCESS_KEY']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
+ access_key = boto.config.get('Credentials', 'aws_access_key_id')
+ elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
+ access_key = boto.config.get('default', 'aws_access_key_id')
+ else:
+ # in case access_key came in as empty string
+ access_key = None
+
+ if not secret_key:
+ if os.environ.get('AWS_SECRET_ACCESS_KEY'):
+ secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
+ elif os.environ.get('AWS_SECRET_KEY'):
+ secret_key = os.environ['AWS_SECRET_KEY']
+ elif os.environ.get('EC2_SECRET_KEY'):
+ secret_key = os.environ['EC2_SECRET_KEY']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
+ secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
+ elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
+ secret_key = boto.config.get('default', 'aws_secret_access_key')
+ else:
+ # in case secret_key came in as empty string
+ secret_key = None
+
+ if not security_token:
+ if os.environ.get('AWS_SECURITY_TOKEN'):
+ security_token = os.environ['AWS_SECURITY_TOKEN']
+ elif os.environ.get('AWS_SESSION_TOKEN'):
+ security_token = os.environ['AWS_SESSION_TOKEN']
+ elif os.environ.get('EC2_SECURITY_TOKEN'):
+ security_token = os.environ['EC2_SECURITY_TOKEN']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
+ security_token = boto.config.get('Credentials', 'aws_security_token')
+ elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
+ security_token = boto.config.get('default', 'aws_security_token')
+ else:
+ # in case secret_token came in as empty string
+ security_token = None
+
+ if not ca_bundle:
+ if os.environ.get('AWS_CA_BUNDLE'):
+ ca_bundle = os.environ.get('AWS_CA_BUNDLE')
+
+ if HAS_BOTO3 and boto3:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ aws_session_token=security_token)
+
+ if profile_name:
+ boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
+ boto_params['profile_name'] = profile_name
+
+ if validate_certs and ca_bundle:
+ boto_params['verify'] = ca_bundle
+ else:
+ boto_params['verify'] = validate_certs
+
+ else:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ security_token=security_token)
+
+ # only set profile_name if passed as an argument
+ if profile_name:
+ boto_params['profile_name'] = profile_name
+
+ boto_params['validate_certs'] = validate_certs
+
+ if config is not None:
+ if HAS_BOTO3 and boto3:
+ boto_params['aws_config'] = botocore.config.Config(**config)
+ elif HAS_BOTO and not boto3:
+ if 'user_agent' in config:
+ sys.modules["boto.connection"].UserAgent = config['user_agent']
+
+ for param, value in boto_params.items():
+ if isinstance(value, binary_type):
+ boto_params[param] = text_type(value, 'utf-8', 'strict')
+
+ return region, ec2_url, boto_params
+
+
+def get_ec2_creds(module):
+ ''' for compatibility mode with old modules that don't/can't yet
+ use ec2_connect method '''
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
+
+
+def boto_fix_security_token_in_profile(conn, profile_name):
+ ''' monkey patch for boto issue boto/boto#2100 '''
+ profile = 'profile ' + profile_name
+ if boto.config.has_option(profile, 'aws_security_token'):
+ conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
+ return conn
+
+
+def connect_to_aws(aws_module, region, **params):
+ try:
+ conn = aws_module.connect_to_region(region, **params)
+ except(boto.provider.ProfileNotFoundError):
+ raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
+ if not conn:
+ if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
+ raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
+ "boto or extend with endpoints_path" % (region, aws_module.__name__))
+ else:
+ raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
+ if params.get('profile_name'):
+ conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
+ return conn
+
+
+def ec2_connect(module):
+
+ """ Return an ec2 connection"""
+
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+
+ # If ec2_url is present use it
+ if ec2_url:
+ try:
+ ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, if we have a region specified, connect to its endpoint.
+ elif region:
+ try:
+ ec2 = connect_to_aws(boto.ec2, region, **boto_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="Either region or ec2_url must be specified")
+
+ return ec2
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, bool):
+ filter_dict['Values'] = [str(v).lower()]
+ elif isinstance(v, integer_types):
+ filter_dict['Values'] = [str(v)]
+ elif isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ # minio seems to return [{}] as an empty tags_list
+ if not tags_list or not any(tag for tag in tags_list):
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
+
+
+def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
+
+ """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_dict = {'MyTagKey': 'MyTagValue'}
+ >>> ansible_dict_to_boto3_tag_list(tags_dict)
+ {
+ 'MyTagKey': 'MyTagValue'
+ }
+ Returns:
+ List: List of dicts containing tag keys and values
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ """
+
+ tags_list = []
+ for k, v in tags_dict.items():
+ tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
+
+ return tags_list
+
+
+def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
+
+ """ Return list of security group IDs from security group names. Note that security group names are not unique
+ across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
+ will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
+ a try block
+ """
+
+ def get_sg_name(sg, boto3):
+
+ if boto3:
+ return sg['GroupName']
+ else:
+ return sg.name
+
+ def get_sg_id(sg, boto3):
+
+ if boto3:
+ return sg['GroupId']
+ else:
+ return sg.id
+
+ sec_group_id_list = []
+
+ if isinstance(sec_group_list, string_types):
+ sec_group_list = [sec_group_list]
+
+ # Get all security groups
+ if boto3:
+ if vpc_id:
+ filters = [
+ {
+ 'Name': 'vpc-id',
+ 'Values': [
+ vpc_id,
+ ]
+ }
+ ]
+ all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
+ else:
+ all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
+ else:
+ if vpc_id:
+ filters = {'vpc-id': vpc_id}
+ all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
+ else:
+ all_sec_groups = ec2_connection.get_all_security_groups()
+
+ unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
+ sec_group_name_list = list(set(sec_group_list) - set(unmatched))
+
+ if len(unmatched) > 0:
+ # If we have unmatched names that look like an ID, assume they are
+ sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
+ still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
+ if len(still_unmatched) > 0:
+ raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
+
+ sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
+
+ return sec_group_id_list
+
+
+def _hashable_policy(policy, policy_list):
+ """
+ Takes a policy and returns a list, the contents of which are all hashable and sorted.
+ Example input policy:
+ {'Version': '2012-10-17',
+ 'Statement': [{'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }]}
+ Returned value:
+ [('Statement', ((('Action', (u's3:PutObjectAcl',)),
+ ('Effect', (u'Allow',)),
+ ('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
+ ('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
+ ('Version', (u'2012-10-17',)))]
+
+ """
+ # Amazon will automatically convert bool and int to strings for us
+ if isinstance(policy, bool):
+ return tuple([str(policy).lower()])
+ elif isinstance(policy, int):
+ return tuple([str(policy)])
+
+ if isinstance(policy, list):
+ for each in policy:
+ tupleified = _hashable_policy(each, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append(tupleified)
+ elif isinstance(policy, string_types) or isinstance(policy, binary_type):
+ policy = to_text(policy)
+ # convert root account ARNs to just account IDs
+ if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
+ policy = policy.split(':')[4]
+ return [policy]
+ elif isinstance(policy, dict):
+ sorted_keys = list(policy.keys())
+ sorted_keys.sort()
+ for key in sorted_keys:
+ element = policy[key]
+ # Special case defined in
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html
+ if key in ["NotPrincipal", "Principal"] and policy[key] == "*":
+ element = {"AWS": "*"}
+ tupleified = _hashable_policy(element, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append((key, tupleified))
+
+ # ensure we aren't returning deeply nested structures of length 1
+ if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
+ policy_list = policy_list[0]
+ if isinstance(policy_list, list):
+ if PY3_COMPARISON:
+ policy_list.sort(key=cmp_to_key(py3cmp))
+ else:
+ policy_list.sort()
+ return policy_list
+
+
+def py3cmp(a, b):
+ """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
+ try:
+ if a > b:
+ return 1
+ elif a < b:
+ return -1
+ else:
+ return 0
+ except TypeError as e:
+ # check to see if they're tuple-string
+ # always say strings are less than tuples (to maintain compatibility with python2)
+ str_ind = to_text(e).find('str')
+ tup_ind = to_text(e).find('tuple')
+ if -1 not in (str_ind, tup_ind):
+ if str_ind < tup_ind:
+ return -1
+ elif tup_ind < str_ind:
+ return 1
+ raise
+
+
+def compare_policies(current_policy, new_policy, default_version="2008-10-17"):
+ """ Compares the existing policy and the updated policy
+ Returns True if there is a difference between policies.
+ """
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html
+ if default_version:
+ if isinstance(current_policy, dict):
+ current_policy = current_policy.copy()
+ current_policy.setdefault("Version", default_version)
+ if isinstance(new_policy, dict):
+ new_policy = new_policy.copy()
+ new_policy.setdefault("Version", default_version)
+
+ return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
+
+
+def sort_json_policy_dict(policy_dict):
+
+ """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
+ different orders will return true
+ Args:
+ policy_dict (dict): Dict representing IAM JSON policy.
+ Basic Usage:
+ >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
+ >>> sort_json_policy_dict(my_iam_policy)
+ Returns:
+ Dict: Will return a copy of the policy as a Dict but any List will be sorted
+ {
+ 'Principle': {
+ 'AWS': [ '7', '14', '31', '101' ]
+ }
+ }
+ """
+
+ def value_is_list(my_list):
+
+ checked_list = []
+ for item in my_list:
+ if isinstance(item, dict):
+ checked_list.append(sort_json_policy_dict(item))
+ elif isinstance(item, list):
+ checked_list.append(value_is_list(item))
+ else:
+ checked_list.append(item)
+
+ # Sort list. If it's a list of dictionaries, sort by tuple of key-value
+ # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
+ checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
+ return checked_list
+
+ ordered_policy_dict = {}
+ for key, value in policy_dict.items():
+ if isinstance(value, dict):
+ ordered_policy_dict[key] = sort_json_policy_dict(value)
+ elif isinstance(value, list):
+ ordered_policy_dict[key] = value_is_list(value)
+ else:
+ ordered_policy_dict[key] = value
+
+ return ordered_policy_dict
+
+
+def map_complex_type(complex_type, type_map):
+ """
+ Allows to cast elements within a dictionary to a specific type
+ Example of usage:
+
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int'
+ }
+
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+
+ This ensures all keys within the root element are casted and valid integers
+ """
+
+ if complex_type is None:
+ return
+ new_type = type(complex_type)()
+ if isinstance(complex_type, dict):
+ for key in complex_type:
+ if key in type_map:
+ if isinstance(type_map[key], list):
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key][0])
+ else:
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key])
+ else:
+ return complex_type
+ elif isinstance(complex_type, list):
+ for i in range(len(complex_type)):
+ new_type.append(map_complex_type(
+ complex_type[i],
+ type_map))
+ elif type_map:
+ return globals()['__builtins__'][type_map](complex_type)
+ return new_type
+
+
+def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
+ """
+ Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
+ Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
+ these may not be able to be used out of the box.
+
+ :param current_tags_dict:
+ :param new_tags_dict:
+ :param purge_tags:
+ :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
+ :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
+ """
+
+ tag_key_value_pairs_to_set = {}
+ tag_keys_to_unset = []
+
+ for key in current_tags_dict.keys():
+ if key not in new_tags_dict and purge_tags:
+ tag_keys_to_unset.append(key)
+
+ for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
+ if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
+ tag_key_value_pairs_to_set[key] = new_tags_dict[key]
+
+ return tag_key_value_pairs_to_set, tag_keys_to_unset
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
new file mode 100644
index 00000000..218052d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from .core import is_boto3_error_code
+from .ec2 import AWSRetry
+
+
+def get_elb(connection, module, elb_name):
+ """
+ Get an ELB based on name. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_name: Name of load balancer to get
+ :return: boto3 ELB dict or None if not found
+ """
+ try:
+ return _get_elb(connection, module, elb_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _get_elb(connection, module, elb_name):
+ """
+ Get an ELB based on name using AWSRetry. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_name: Name of load balancer to get
+ :return: boto3 ELB dict or None if not found
+ """
+
+ try:
+ load_balancer_paginator = connection.get_paginator('describe_load_balancers')
+ return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0]
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ return None
+
+
+def get_elb_listener(connection, module, elb_arn, listener_port):
+ """
+ Get an ELB listener based on the port provided. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_arn: ARN of the ELB to look at
+ :param listener_port: Port of the listener to look for
+ :return: boto3 ELB listener dict or None if not found
+ """
+
+ try:
+ listener_paginator = connection.get_paginator('describe_listeners')
+ listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ l = None
+
+ for listener in listeners:
+ if listener['Port'] == listener_port:
+ l = listener
+ break
+
+ return l
+
+
+def get_elb_listener_rules(connection, module, listener_arn):
+ """
+ Get rules for a particular ELB listener using the listener ARN.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param listener_arn: ARN of the ELB listener
+ :return: boto3 ELB rules list
+ """
+
+ try:
+ return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+
+def convert_tg_name_to_arn(connection, module, tg_name):
+ """
+ Get ARN of a target group using the target group's name
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param tg_name: Name of the target group
+ :return: target group ARN string
+ """
+
+ try:
+ response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ tg_arn = response['TargetGroups'][0]['TargetGroupArn']
+
+ return tg_arn
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
new file mode 100644
index 00000000..6078fba3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
@@ -0,0 +1,919 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+from copy import deepcopy
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+from .ec2 import get_ec2_security_group_ids_from_names
+from .elb_utils import convert_tg_name_to_arn
+from .elb_utils import get_elb
+from .elb_utils import get_elb_listener
+
+
+# ForwardConfig may be optional if we've got a single TargetGroupArn entry
+def _prune_ForwardConfig(action):
+ if "ForwardConfig" in action and action['Type'] == 'forward':
+ if action["ForwardConfig"] == {
+ 'TargetGroupStickinessConfig': {'Enabled': False},
+ 'TargetGroups': [{"TargetGroupArn": action["TargetGroupArn"], "Weight": 1}]}:
+ newAction = action.copy()
+ del(newAction["ForwardConfig"])
+ return newAction
+ return action
+
+
+# the AWS api won't return the client secret, so we'll have to remove it
+# or the module will always see the new and current actions as different
+# and try to apply the same config
+def _prune_secret(action):
+ if action['Type'] == 'authenticate-oidc':
+ action['AuthenticateOidcConfig'].pop('ClientSecret')
+ return action
+
+
+def _sort_actions(actions):
+ return sorted(actions, key=lambda x: x.get('Order', 0))
+
+
+class ElasticLoadBalancerV2(object):
+
+ def __init__(self, connection, module):
+
+ self.connection = connection
+ self.module = module
+ self.changed = False
+ self.new_load_balancer = False
+ self.scheme = module.params.get("scheme")
+ self.name = module.params.get("name")
+ self.subnet_mappings = module.params.get("subnet_mappings")
+ self.subnets = module.params.get("subnets")
+ self.deletion_protection = module.params.get("deletion_protection")
+ self.wait = module.params.get("wait")
+
+ if module.params.get("tags") is not None:
+ self.tags = ansible_dict_to_boto3_tag_list(module.params.get("tags"))
+ else:
+ self.tags = None
+ self.purge_tags = module.params.get("purge_tags")
+
+ self.elb = get_elb(connection, module, self.name)
+ if self.elb is not None:
+ self.elb_attributes = self.get_elb_attributes()
+ self.elb['tags'] = self.get_elb_tags()
+ else:
+ self.elb_attributes = None
+
+ def wait_for_status(self, elb_arn):
+ """
+ Wait for load balancer to reach 'active' status
+
+ :param elb_arn: The load balancer ARN
+ :return:
+ """
+
+ try:
+ waiter = self.connection.get_waiter('load_balancer_available')
+ waiter.wait(LoadBalancerArns=[elb_arn])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def get_elb_attributes(self):
+ """
+ Get load balancer attributes
+
+ :return:
+ """
+
+ try:
+ attr_list = AWSRetry.jittered_backoff()(
+ self.connection.describe_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes']
+
+ elb_attributes = boto3_tag_list_to_ansible_dict(attr_list)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items())
+
+ def update_elb_attributes(self):
+ """
+ Update the elb_attributes parameter
+ :return:
+ """
+ self.elb_attributes = self.get_elb_attributes()
+
+ def get_elb_tags(self):
+ """
+ Get load balancer tags
+
+ :return:
+ """
+
+ try:
+ return AWSRetry.jittered_backoff()(
+ self.connection.describe_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def delete_tags(self, tags_to_delete):
+ """
+ Delete elb tags
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.remove_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def modify_tags(self):
+ """
+ Modify elb tags
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.add_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def delete(self):
+ """
+ Delete elb
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.delete_load_balancer
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def compare_subnets(self):
+ """
+ Compare user subnets with current ELB subnets
+
+ :return: bool True if they match otherwise False
+ """
+
+ subnet_mapping_id_list = []
+ subnet_mappings = []
+
+ # Check if we're dealing with subnets or subnet_mappings
+ if self.subnets is not None:
+ # Convert subnets to subnet_mappings format for comparison
+ for subnet in self.subnets:
+ subnet_mappings.append({'SubnetId': subnet})
+
+ if self.subnet_mappings is not None:
+ # Use this directly since we're comparing as a mapping
+ subnet_mappings = self.subnet_mappings
+
+ # Build a subnet_mapping style struture of what's currently
+ # on the load balancer
+ for subnet in self.elb['AvailabilityZones']:
+ this_mapping = {'SubnetId': subnet['SubnetId']}
+ for address in subnet.get('LoadBalancerAddresses', []):
+ if 'AllocationId' in address:
+ this_mapping['AllocationId'] = address['AllocationId']
+ break
+
+ subnet_mapping_id_list.append(this_mapping)
+
+ return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings)
+
+ def modify_subnets(self):
+ """
+ Modify elb subnets to match module parameters
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.set_subnets
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def update(self):
+ """
+ Update the elb from AWS
+ :return:
+ """
+
+ self.elb = get_elb(self.connection, self.module, self.module.params.get("name"))
+ self.elb['tags'] = self.get_elb_tags()
+
+
+class ApplicationLoadBalancer(ElasticLoadBalancerV2):
+
+ def __init__(self, connection, connection_ec2, module):
+ """
+
+ :param connection: boto3 connection
+ :param module: Ansible module
+ """
+ super(ApplicationLoadBalancer, self).__init__(connection, module)
+
+ self.connection_ec2 = connection_ec2
+
+ # Ansible module parameters specific to ALBs
+ self.type = 'application'
+ if module.params.get('security_groups') is not None:
+ try:
+ self.security_groups = AWSRetry.jittered_backoff()(
+ get_ec2_security_group_ids_from_names
+ )(module.params.get('security_groups'), self.connection_ec2, boto3=True)
+ except ValueError as e:
+ self.module.fail_json(msg=str(e), exception=traceback.format_exc())
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+ else:
+ self.security_groups = module.params.get('security_groups')
+ self.access_logs_enabled = module.params.get("access_logs_enabled")
+ self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket")
+ self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix")
+ self.idle_timeout = module.params.get("idle_timeout")
+ self.http2 = module.params.get("http2")
+
+ if self.elb is not None and self.elb['Type'] != 'application':
+ self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.")
+
+ def create_elb(self):
+ """
+ Create a load balancer
+ :return:
+ """
+
+ # Required parameters
+ params = dict()
+ params['Name'] = self.name
+ params['Type'] = self.type
+
+ # Other parameters
+ if self.subnets is not None:
+ params['Subnets'] = self.subnets
+ if self.subnet_mappings is not None:
+ params['SubnetMappings'] = self.subnet_mappings
+ if self.security_groups is not None:
+ params['SecurityGroups'] = self.security_groups
+ params['Scheme'] = self.scheme
+ if self.tags:
+ params['Tags'] = self.tags
+
+ try:
+ self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
+ self.changed = True
+ self.new_load_balancer = True
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ if self.wait:
+ self.wait_for_status(self.elb['LoadBalancerArn'])
+
+ def modify_elb_attributes(self):
+ """
+ Update Application ELB attributes if required
+
+ :return:
+ """
+
+ update_attributes = []
+
+ if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
+ update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
+ if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
+ update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
+ if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
+ update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
+ if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
+ update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+ if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
+ update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
+ if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
+ update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
+
+ if update_attributes:
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.modify_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
+ self.changed = True
+ except (BotoCoreError, ClientError) as e:
+ # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
+ if self.new_load_balancer:
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ self.module.fail_json_aws(e)
+
+ def compare_security_groups(self):
+ """
+ Compare user security groups with current ELB security groups
+
+ :return: bool True if they match otherwise False
+ """
+
+ if set(self.elb['SecurityGroups']) != set(self.security_groups):
+ return False
+ else:
+ return True
+
+ def modify_security_groups(self):
+ """
+ Modify elb security groups to match module parameters
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.set_security_groups
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+
+class NetworkLoadBalancer(ElasticLoadBalancerV2):
+
+ def __init__(self, connection, connection_ec2, module):
+
+ """
+
+ :param connection: boto3 connection
+ :param module: Ansible module
+ """
+ super(NetworkLoadBalancer, self).__init__(connection, module)
+
+ self.connection_ec2 = connection_ec2
+
+ # Ansible module parameters specific to NLBs
+ self.type = 'network'
+ self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing')
+
+ if self.elb is not None and self.elb['Type'] != 'network':
+ self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.")
+
+ def create_elb(self):
+ """
+ Create a load balancer
+ :return:
+ """
+
+ # Required parameters
+ params = dict()
+ params['Name'] = self.name
+ params['Type'] = self.type
+
+ # Other parameters
+ if self.subnets is not None:
+ params['Subnets'] = self.subnets
+ if self.subnet_mappings is not None:
+ params['SubnetMappings'] = self.subnet_mappings
+ params['Scheme'] = self.scheme
+ if self.tags:
+ params['Tags'] = self.tags
+
+ try:
+ self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
+ self.changed = True
+ self.new_load_balancer = True
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ if self.wait:
+ self.wait_for_status(self.elb['LoadBalancerArn'])
+
+ def modify_elb_attributes(self):
+ """
+ Update Network ELB attributes if required
+
+ :return:
+ """
+
+ update_attributes = []
+
+ if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \
+ self.elb_attributes['load_balancing_cross_zone_enabled']:
+ update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()})
+ if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
+ update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+
+ if update_attributes:
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.modify_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
+ self.changed = True
+ except (BotoCoreError, ClientError) as e:
+ # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
+ if self.new_load_balancer:
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ self.module.fail_json_aws(e)
+
+ def modify_subnets(self):
+ """
+ Modify elb subnets to match module parameters (unsupported for NLB)
+ :return:
+ """
+
+ self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer')
+
+
+class ELBListeners(object):
+
+ def __init__(self, connection, module, elb_arn):
+
+ self.connection = connection
+ self.module = module
+ self.elb_arn = elb_arn
+ listeners = module.params.get("listeners")
+ if listeners is not None:
+ # Remove suboption argspec defaults of None from each listener
+ listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners]
+ self.listeners = self._ensure_listeners_default_action_has_arn(listeners)
+ self.current_listeners = self._get_elb_listeners()
+ self.purge_listeners = module.params.get("purge_listeners")
+ self.changed = False
+
+ def update(self):
+ """
+ Update the listeners for the ELB
+
+ :return:
+ """
+ self.current_listeners = self._get_elb_listeners()
+
+ def _get_elb_listeners(self):
+ """
+ Get ELB listeners
+
+ :return:
+ """
+
+ try:
+ listener_paginator = self.connection.get_paginator('describe_listeners')
+ return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def _ensure_listeners_default_action_has_arn(self, listeners):
+ """
+ If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and
+ replace the name.
+
+ :param listeners: a list of listener dicts
+ :return: the same list of dicts ensuring that each listener DefaultActions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
+ """
+
+ if not listeners:
+ listeners = []
+
+ fixed_listeners = []
+ for listener in listeners:
+ fixed_actions = []
+ for action in listener['DefaultActions']:
+ if 'TargetGroupName' in action:
+ action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection,
+ self.module,
+ action['TargetGroupName'])
+ del action['TargetGroupName']
+ fixed_actions.append(action)
+ listener['DefaultActions'] = fixed_actions
+ fixed_listeners.append(listener)
+
+ return fixed_listeners
+
+ def compare_listeners(self):
+ """
+
+ :return:
+ """
+ listeners_to_modify = []
+ listeners_to_delete = []
+ listeners_to_add = deepcopy(self.listeners)
+
+ # Check each current listener port to see if it's been passed to the module
+ for current_listener in self.current_listeners:
+ current_listener_passed_to_module = False
+ for new_listener in self.listeners[:]:
+ new_listener['Port'] = int(new_listener['Port'])
+ if current_listener['Port'] == new_listener['Port']:
+ current_listener_passed_to_module = True
+ # Remove what we match so that what is left can be marked as 'to be added'
+ listeners_to_add.remove(new_listener)
+ modified_listener = self._compare_listener(current_listener, new_listener)
+ if modified_listener:
+ modified_listener['Port'] = current_listener['Port']
+ modified_listener['ListenerArn'] = current_listener['ListenerArn']
+ listeners_to_modify.append(modified_listener)
+ break
+
+ # If the current listener was not matched against passed listeners and purge is True, mark for removal
+ if not current_listener_passed_to_module and self.purge_listeners:
+ listeners_to_delete.append(current_listener['ListenerArn'])
+
+ return listeners_to_add, listeners_to_modify, listeners_to_delete
+
+ def _compare_listener(self, current_listener, new_listener):
+ """
+ Compare two listeners.
+
+ :param current_listener:
+ :param new_listener:
+ :return:
+ """
+
+ modified_listener = {}
+
+ # Port
+ if current_listener['Port'] != new_listener['Port']:
+ modified_listener['Port'] = new_listener['Port']
+
+ # Protocol
+ if current_listener['Protocol'] != new_listener['Protocol']:
+ modified_listener['Protocol'] = new_listener['Protocol']
+
+ # If Protocol is HTTPS, check additional attributes
+ if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
+ # Cert
+ if current_listener['SslPolicy'] != new_listener['SslPolicy']:
+ modified_listener['SslPolicy'] = new_listener['SslPolicy']
+ if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']:
+ modified_listener['Certificates'] = []
+ modified_listener['Certificates'].append({})
+ modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
+ elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
+ modified_listener['SslPolicy'] = new_listener['SslPolicy']
+ modified_listener['Certificates'] = []
+ modified_listener['Certificates'].append({})
+ modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
+
+ # Default action
+
+ # Check proper rule format on current listener
+ if len(current_listener['DefaultActions']) > 1:
+ for action in current_listener['DefaultActions']:
+ if 'Order' not in action:
+ self.module.fail_json(msg="'Order' key not found in actions. "
+ "installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+
+ # If the lengths of the actions are the same, we'll have to verify that the
+ # contents of those actions are the same
+ if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']):
+ current_actions_sorted = _sort_actions(current_listener['DefaultActions'])
+ new_actions_sorted = _sort_actions(new_listener['DefaultActions'])
+
+ new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
+
+ if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]:
+ modified_listener['DefaultActions'] = new_listener['DefaultActions']
+ # If the action lengths are different, then replace with the new actions
+ else:
+ modified_listener['DefaultActions'] = new_listener['DefaultActions']
+
+ if modified_listener:
+ return modified_listener
+ else:
+ return None
+
+
+class ELBListener(object):
+
+ def __init__(self, connection, module, listener, elb_arn):
+ """
+
+ :param connection:
+ :param module:
+ :param listener:
+ :param elb_arn:
+ """
+
+ self.connection = connection
+ self.module = module
+ self.listener = listener
+ self.elb_arn = elb_arn
+
+ def add(self):
+
+ try:
+ # Rules is not a valid parameter for create_listener
+ if 'Rules' in self.listener:
+ self.listener.pop('Rules')
+ AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener)
+ except (BotoCoreError, ClientError) as e:
+ if '"Order", must be one of: Type, TargetGroupArn' in str(e):
+ self.module.fail_json(msg="installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+ else:
+ self.module.fail_json_aws(e)
+
+ def modify(self):
+
+ try:
+ # Rules is not a valid parameter for modify_listener
+ if 'Rules' in self.listener:
+ self.listener.pop('Rules')
+ AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener)
+ except (BotoCoreError, ClientError) as e:
+ if '"Order", must be one of: Type, TargetGroupArn' in str(e):
+ self.module.fail_json(msg="installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+ else:
+ self.module.fail_json_aws(e)
+
+ def delete(self):
+
+ try:
+ AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+
+class ELBListenerRules(object):
+
+ def __init__(self, connection, module, elb_arn, listener_rules, listener_port):
+
+ self.connection = connection
+ self.module = module
+ self.elb_arn = elb_arn
+ self.rules = self._ensure_rules_action_has_arn(listener_rules)
+ self.changed = False
+
+ # Get listener based on port so we can use ARN
+ self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port)
+ self.listener_arn = self.current_listener['ListenerArn']
+ self.rules_to_add = deepcopy(self.rules)
+ self.rules_to_modify = []
+ self.rules_to_delete = []
+
+ # If the listener exists (i.e. has an ARN) get rules for the listener
+ if 'ListenerArn' in self.current_listener:
+ self.current_rules = self._get_elb_listener_rules()
+ else:
+ self.current_rules = []
+
+ def _ensure_rules_action_has_arn(self, rules):
+ """
+ If a rule Action has been passed with a Target Group Name instead of ARN, lookup the ARN and
+ replace the name.
+
+ :param rules: a list of rule dicts
+ :return: the same list of dicts ensuring that each rule Actions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
+ """
+
+ fixed_rules = []
+ for rule in rules:
+ fixed_actions = []
+ for action in rule['Actions']:
+ if 'TargetGroupName' in action:
+ action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName'])
+ del action['TargetGroupName']
+ fixed_actions.append(action)
+ rule['Actions'] = fixed_actions
+ fixed_rules.append(rule)
+
+ return fixed_rules
+
+ def _get_elb_listener_rules(self):
+
+ try:
+ return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def _compare_condition(self, current_conditions, condition):
+ """
+
+ :param current_conditions:
+ :param condition:
+ :return:
+ """
+
+ condition_found = False
+
+ for current_condition in current_conditions:
+ # host-header: current_condition includes both HostHeaderConfig AND Values while
+ # condition can be defined with either HostHeaderConfig OR Values. Only use
+ # HostHeaderConfig['Values'] comparison if both conditions includes HostHeaderConfig.
+ if current_condition.get('HostHeaderConfig') and condition.get('HostHeaderConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HostHeaderConfig']['Values']) == sorted(condition['HostHeaderConfig']['Values'])):
+ condition_found = True
+ break
+ elif current_condition.get('HttpHeaderConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HttpHeaderConfig']['Values']) == sorted(condition['HttpHeaderConfig']['Values']) and
+ current_condition['HttpHeaderConfig']['HttpHeaderName'] == condition['HttpHeaderConfig']['HttpHeaderName']):
+ condition_found = True
+ break
+ elif current_condition.get('HttpRequestMethodConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HttpRequestMethodConfig']['Values']) == sorted(condition['HttpRequestMethodConfig']['Values'])):
+ condition_found = True
+ break
+ # path-pattern: current_condition includes both PathPatternConfig AND Values while
+ # condition can be defined with either PathPatternConfig OR Values. Only use
+ # PathPatternConfig['Values'] comparison if both conditions includes PathPatternConfig.
+ elif current_condition.get('PathPatternConfig') and condition.get('PathPatternConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['PathPatternConfig']['Values']) == sorted(condition['PathPatternConfig']['Values'])):
+ condition_found = True
+ break
+ elif current_condition.get('QueryStringConfig'):
+ # QueryString Values is not sorted as it is the only list of dicts (not strings).
+ if (current_condition['Field'] == condition['Field'] and
+ current_condition['QueryStringConfig']['Values'] == condition['QueryStringConfig']['Values']):
+ condition_found = True
+ break
+ elif current_condition.get('SourceIpConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['SourceIpConfig']['Values']) == sorted(condition['SourceIpConfig']['Values'])):
+ condition_found = True
+ break
+ # Not all fields are required to have Values list nested within a *Config dict
+ # e.g. fields host-header/path-pattern can directly list Values
+ elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']):
+ condition_found = True
+ break
+
+ return condition_found
+
+ def _compare_rule(self, current_rule, new_rule):
+ """
+
+ :return:
+ """
+
+ modified_rule = {}
+
+ # Priority
+ if int(current_rule['Priority']) != int(new_rule['Priority']):
+ modified_rule['Priority'] = new_rule['Priority']
+
+ # Actions
+
+ # Check proper rule format on current listener
+ if len(current_rule['Actions']) > 1:
+ for action in current_rule['Actions']:
+ if 'Order' not in action:
+ self.module.fail_json(msg="'Order' key not found in actions. "
+ "installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+
+ # If the lengths of the actions are the same, we'll have to verify that the
+ # contents of those actions are the same
+ if len(current_rule['Actions']) == len(new_rule['Actions']):
+ # if actions have just one element, compare the contents and then update if
+ # they're different
+ current_actions_sorted = _sort_actions(current_rule['Actions'])
+ new_actions_sorted = _sort_actions(new_rule['Actions'])
+
+ new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
+
+ if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]:
+ modified_rule['Actions'] = new_rule['Actions']
+ # If the action lengths are different, then replace with the new actions
+ else:
+ modified_rule['Actions'] = new_rule['Actions']
+
+ # Conditions
+ modified_conditions = []
+ for condition in new_rule['Conditions']:
+ if not self._compare_condition(current_rule['Conditions'], condition):
+ modified_conditions.append(condition)
+
+ if modified_conditions:
+ modified_rule['Conditions'] = modified_conditions
+
+ return modified_rule
+
+ def compare_rules(self):
+ """
+
+ :return:
+ """
+
+ rules_to_modify = []
+ rules_to_delete = []
+ rules_to_add = deepcopy(self.rules)
+
+ for current_rule in self.current_rules:
+ current_rule_passed_to_module = False
+ for new_rule in self.rules[:]:
+ if current_rule['Priority'] == str(new_rule['Priority']):
+ current_rule_passed_to_module = True
+ # Remove what we match so that what is left can be marked as 'to be added'
+ rules_to_add.remove(new_rule)
+ modified_rule = self._compare_rule(current_rule, new_rule)
+ if modified_rule:
+ modified_rule['Priority'] = int(current_rule['Priority'])
+ modified_rule['RuleArn'] = current_rule['RuleArn']
+ modified_rule['Actions'] = new_rule['Actions']
+ modified_rule['Conditions'] = new_rule['Conditions']
+ rules_to_modify.append(modified_rule)
+ break
+
+ # If the current rule was not matched against passed rules, mark for removal
+ if not current_rule_passed_to_module and not current_rule['IsDefault']:
+ rules_to_delete.append(current_rule['RuleArn'])
+
+ return rules_to_add, rules_to_modify, rules_to_delete
+
+
+class ELBListenerRule(object):
+
+ def __init__(self, connection, module, rule, listener_arn):
+
+ self.connection = connection
+ self.module = module
+ self.rule = rule
+ self.listener_arn = listener_arn
+ self.changed = False
+
+ def create(self):
+ """
+ Create a listener rule
+
+ :return:
+ """
+
+ try:
+ self.rule['ListenerArn'] = self.listener_arn
+ self.rule['Priority'] = int(self.rule['Priority'])
+ AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule)
+ except (BotoCoreError, ClientError) as e:
+ if '"Order", must be one of: Type, TargetGroupArn' in str(e):
+ self.module.fail_json(msg="installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+ else:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def modify(self):
+ """
+ Modify a listener rule
+
+ :return:
+ """
+
+ try:
+ del self.rule['Priority']
+ AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule)
+ except (BotoCoreError, ClientError) as e:
+ if '"Order", must be one of: Type, TargetGroupArn' in str(e):
+ self.module.fail_json(msg="installed version of botocore does not support "
+ "multiple actions, please upgrade botocore to version "
+ "1.10.30 or higher")
+ else:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def delete(self):
+ """
+ Delete a listener rule
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn'])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/iam.py
new file mode 100644
index 00000000..7e6aba78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/iam.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_native
+
+from .ec2 import AWSRetry
+from .core import is_boto3_error_code
+
+
+def get_aws_account_id(module):
+ """ Given an AnsibleAWSModule instance, get the active AWS account ID
+ """
+
+ return get_aws_account_info(module)[0]
+
+
+def get_aws_account_info(module):
+ """Given an AnsibleAWSModule instance, return the account information
+ (account id and partition) we are currently working on
+
+ get_account_info tries too find out the account that we are working
+ on. It's not guaranteed that this will be easy so we try in
+ several different ways. Giving either IAM or STS privileges to
+ the account should be enough to permit this.
+
+ Tries:
+ - sts:GetCallerIdentity
+ - iam:GetUser
+ - sts:DecodeAuthorizationMessage
+ """
+ account_id = None
+ partition = None
+ try:
+ sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+ caller_id = sts_client.get_caller_identity(aws_retry=True)
+ account_id = caller_id.get('Account')
+ partition = caller_id.get('Arn').split(':')[1]
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError):
+ try:
+ iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ arn, partition, service, reg, account_id, resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':')
+ except is_boto3_error_code('AccessDenied') as e:
+ try:
+ except_msg = to_native(e.message)
+ except AttributeError:
+ except_msg = to_native(e)
+ m = re.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/", except_msg)
+ if m is None:
+ module.fail_json_aws(
+ e,
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+ account_id = m.group(4)
+ partition = m.group(1)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e,
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+
+ if account_id is None or partition is None:
+ module.fail_json(
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+
+ return (to_native(account_id), to_native(partition))
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/rds.py
new file mode 100644
index 00000000..221b92ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/rds.py
@@ -0,0 +1,235 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import namedtuple
+from time import sleep
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, WaiterError
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+from .ec2 import compare_aws_tags
+from .waiters import get_waiter
+
+Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'cluster', 'instance'])
+# Whitelist boto3 client methods for cluster and instance resources
+cluster_method_names = [
+ 'create_db_cluster', 'restore_db_cluster_from_db_snapshot', 'restore_db_cluster_from_s3',
+ 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
+ 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
+]
+instance_method_names = [
+ 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
+ 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
+ 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
+ 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance'
+]
+
+
+def get_rds_method_attribute(method_name, module):
+ readable_op = method_name.replace('_', ' ').replace('db', 'DB')
+ if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
+ cluster = True
+ instance = False
+ if method_name == 'delete_db_cluster':
+ waiter = 'cluster_deleted'
+ else:
+ waiter = 'cluster_available'
+ elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
+ cluster = False
+ instance = True
+ if method_name == 'delete_db_instance':
+ waiter = 'db_instance_deleted'
+ elif method_name == 'stop_db_instance':
+ waiter = 'db_instance_stopped'
+ else:
+ waiter = 'db_instance_available'
+ else:
+ raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name))
+
+ return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, cluster=cluster, instance=instance)
+
+
+def get_final_identifier(method_name, module):
+ apply_immediately = module.params['apply_immediately']
+ if get_rds_method_attribute(method_name, module).cluster:
+ identifier = module.params['db_cluster_identifier']
+ updated_identifier = module.params['new_db_cluster_identifier']
+ elif get_rds_method_attribute(method_name, module).instance:
+ identifier = module.params['db_instance_identifier']
+ updated_identifier = module.params['new_db_instance_identifier']
+ else:
+ raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name))
+ if not module.check_mode and updated_identifier and apply_immediately:
+ identifier = updated_identifier
+ return identifier
+
+
+def handle_errors(module, exception, method_name, parameters):
+
+ if not isinstance(exception, ClientError):
+ module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
+
+ changed = True
+ error_code = exception.response['Error']['Code']
+ if method_name == 'modify_db_instance' and error_code == 'InvalidParameterCombination':
+ if 'No modifications were requested' in to_text(exception):
+ changed = False
+ elif 'ModifyDbCluster API' in to_text(exception):
+ module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
+ if 'DB Instance is not a read replica' in to_text(exception):
+ changed = False
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ elif method_name == 'create_db_instance' and exception.response['Error']['Code'] == 'InvalidParameterValue':
+ accepted_engines = [
+ 'aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-se',
+ 'oracle-se1', 'oracle-se2', 'postgres', 'sqlserver-ee', 'sqlserver-ex', 'sqlserver-se', 'sqlserver-web'
+ ]
+ if parameters.get('Engine') not in accepted_engines:
+ module.fail_json_aws(exception, msg='DB engine {0} should be one of {1}'.format(parameters.get('Engine'), accepted_engines))
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+
+ return changed
+
+
+def call_method(client, module, method_name, parameters):
+ result = {}
+ changed = True
+ if not module.check_mode:
+ wait = module.params['wait']
+ # TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes
+ method = getattr(client, method_name)
+ try:
+ if method_name == 'modify_db_instance':
+ # check if instance is in an available state first, if possible
+ if wait:
+ wait_for_status(client, module, module.params['db_instance_identifier'], method_name)
+ result = AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidDBInstanceState'])(method)(**parameters)
+ else:
+ result = AWSRetry.jittered_backoff()(method)(**parameters)
+ except (BotoCoreError, ClientError) as e:
+ changed = handle_errors(module, e, method_name, parameters)
+
+ if wait and changed:
+ identifier = get_final_identifier(method_name, module)
+ wait_for_status(client, module, identifier, method_name)
+ return result, changed
+
+
+def wait_for_instance_status(client, module, db_instance_id, waiter_name):
+ def wait(client, db_instance_id, waiter_name, extra_retry_codes):
+ retry = AWSRetry.jittered_backoff(catch_extra_error_codes=extra_retry_codes)
+ try:
+ waiter = client.get_waiter(waiter_name)
+ except ValueError:
+ # using a waiter in module_utils/waiters.py
+ waiter = get_waiter(client, waiter_name)
+ waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
+
+ waiter_expected_status = {
+ 'db_instance_deleted': 'deleted',
+ 'db_instance_stopped': 'stopped',
+ }
+ expected_status = waiter_expected_status.get(waiter_name, 'available')
+ if expected_status == 'available':
+ extra_retry_codes = ['DBInstanceNotFound']
+ else:
+ extra_retry_codes = []
+ for attempt_to_wait in range(0, 10):
+ try:
+ wait(client, db_instance_id, waiter_name, extra_retry_codes)
+ break
+ except WaiterError as e:
+ # Instance may be renamed and AWSRetry doesn't handle WaiterError
+ if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
+ sleep(10)
+ continue
+ module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
+ db_instance_id, expected_status)
+ )
+
+
+def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
+ try:
+ waiter = get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
+ except WaiterError as e:
+ if waiter_name == 'cluster_deleted':
+ msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
+ else:
+ msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
+ module.fail_json_aws(e, msg=msg)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
+
+
+def wait_for_status(client, module, identifier, method_name):
+ waiter_name = get_rds_method_attribute(method_name, module).waiter
+ if get_rds_method_attribute(method_name, module).cluster:
+ wait_for_cluster_status(client, module, identifier, waiter_name)
+ elif get_rds_method_attribute(method_name, module).instance:
+ wait_for_instance_status(client, module, identifier, waiter_name)
+ else:
+ raise NotImplementedError("method {0} hasn't been added to the whitelist of handled methods".format(method_name))
+
+
+def get_tags(client, module, cluster_arn):
+ try:
+ return boto3_tag_list_to_ansible_dict(
+ client.list_tags_for_resource(ResourceName=cluster_arn)['TagList']
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe tags")
+
+
+def arg_spec_to_rds_params(options_dict):
+ tags = options_dict.pop('tags')
+ has_processor_features = False
+ if 'processor_features' in options_dict:
+ has_processor_features = True
+ processor_features = options_dict.pop('processor_features')
+ camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
+ for key in list(camel_options.keys()):
+ for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
+ if old in key:
+ camel_options[key.replace(old, new)] = camel_options.pop(key)
+ camel_options['Tags'] = tags
+ if has_processor_features:
+ camel_options['ProcessorFeatures'] = processor_features
+ return camel_options
+
+
+def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
+ if tags is None:
+ return False
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
+ changed = bool(tags_to_add or tags_to_remove)
+ if tags_to_add:
+ call_method(
+ client, module, method_name='add_tags_to_resource',
+ parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
+ )
+ if tags_to_remove:
+ call_method(
+ client, module, method_name='remove_tags_from_resource',
+ parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
+ )
+ return changed
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/s3.py
new file mode 100644
index 00000000..32e0e822
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/s3.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by the calling module
+
+HAS_MD5 = True
+try:
+ from hashlib import md5
+except ImportError:
+ try:
+ from md5 import md5
+ except ImportError:
+ HAS_MD5 = False
+
+
+def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ with open(filename, 'rb') as f:
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ digests.append(md5(f.read(int(head['ContentLength']))))
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(module.md5(filename))
+
+
+def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+ offset = 0
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ length = int(head['ContentLength'])
+ digests.append(md5(content[offset:offset + length]))
+ offset += length
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(md5(content).hexdigest())
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/urls.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/urls.py
new file mode 100644
index 00000000..e00f485c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/urls.py
@@ -0,0 +1,212 @@
+# Copyright: (c) 2018, Aaron Haaf <aabonh@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import hashlib
+import hmac
+import operator
+
+try:
+ from boto3 import session
+except ImportError:
+ pass
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+
+from .ec2 import HAS_BOTO3
+from .ec2 import get_aws_connection_info
+
+
+def hexdigest(s):
+ """
+ Returns the sha256 hexdigest of a string after encoding.
+ """
+
+ return hashlib.sha256(s.encode("utf-8")).hexdigest()
+
+
+def format_querystring(params=None):
+ """
+ Returns properly url-encoded query string from the provided params dict.
+
+ It's specially sorted for cannonical requests
+ """
+
+ if not params:
+ return ""
+
+ # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name.
+ return urlencode(sorted(params.items(), operator.itemgetter(0)))
+
+
+# Key derivation functions. See:
+# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
+def sign(key, msg):
+ '''
+ Return digest for key applied to msg
+ '''
+
+ return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
+
+
+def get_signature_key(key, dateStamp, regionName, serviceName):
+ '''
+ Returns signature key for AWS resource
+ '''
+
+ kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp)
+ kRegion = sign(kDate, regionName)
+ kService = sign(kRegion, serviceName)
+ kSigning = sign(kService, "aws4_request")
+ return kSigning
+
+
+def get_aws_credentials_object(module):
+ '''
+ Returns aws_access_key_id, aws_secret_access_key, session_token for a module.
+ '''
+
+ if not HAS_BOTO3:
+ module.fail_json("get_aws_credentials_object requires boto3")
+
+ dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True)
+ s = session.Session(**boto_params)
+
+ return s.get_credentials()
+
+
+# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
+def signed_request(
+ module=None,
+ method="GET", service=None, host=None, uri=None,
+ query=None, body="", headers=None,
+ session_in_header=True, session_in_query=False
+):
+ """Generate a SigV4 request to an AWS resource for a module
+
+ This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain.
+
+ Returns :class:`HTTPResponse` object.
+
+ Example:
+ result = signed_request(
+ module=this,
+ service="es",
+ host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com",
+ )
+
+ :kwarg host: endpoint to talk to
+ :kwarg service: AWS id of service (like `ec2` or `es`)
+ :kwarg module: An AnsibleAWSModule to gather connection info from
+
+ :kwarg body: (optional) Payload to send
+ :kwarg method: (optional) HTTP verb to use
+ :kwarg query: (optional) dict of query params to handle
+ :kwarg uri: (optional) Resource path without query parameters
+
+ :kwarg session_in_header: (optional) Add the session token to the headers
+ :kwarg session_in_query: (optional) Add the session token to the query parameters
+
+ :returns: HTTPResponse
+ """
+
+ if not HAS_BOTO3:
+ module.fail_json("A sigv4 signed_request requires boto3")
+
+ # "Constants"
+
+ t = datetime.datetime.utcnow()
+ amz_date = t.strftime("%Y%m%dT%H%M%SZ")
+ datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope
+ algorithm = "AWS4-HMAC-SHA256"
+
+ # AWS stuff
+
+ region, dummy, dummy = get_aws_connection_info(module, boto3=True)
+ credentials = get_aws_credentials_object(module)
+ access_key = credentials.access_key
+ secret_key = credentials.secret_key
+ session_token = credentials.token
+
+ if not access_key:
+ module.fail_json(msg="aws_access_key_id is missing")
+ if not secret_key:
+ module.fail_json(msg="aws_secret_access_key is missing")
+
+ credential_scope = "/".join([datestamp, region, service, "aws4_request"])
+
+ # Argument Defaults
+
+ uri = uri or "/"
+ query_string = format_querystring(query) if query else ""
+
+ headers = headers or dict()
+ query = query or dict()
+
+ headers.update({
+ "host": host,
+ "x-amz-date": amz_date,
+ })
+
+ # Handle adding of session_token if present
+ if session_token:
+ if session_in_header:
+ headers["X-Amz-Security-Token"] = session_token
+ if session_in_query:
+ query["X-Amz-Security-Token"] = session_token
+
+ if method == "GET":
+ body = ""
+
+ # Derived data
+
+ body_hash = hexdigest(body)
+ signed_headers = ";".join(sorted(headers.keys()))
+
+ # Setup Cannonical request to generate auth token
+
+ cannonical_headers = "\n".join([
+ key.lower().strip() + ":" + value for key, value in headers.items()
+ ]) + "\n" # Note additional trailing newline
+
+ cannonical_request = "\n".join([
+ method,
+ uri,
+ query_string,
+ cannonical_headers,
+ signed_headers,
+ body_hash,
+ ])
+
+ string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)])
+
+ # Sign the Cannonical request
+
+ signing_key = get_signature_key(secret_key, datestamp, region, service)
+ signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
+
+ # Make auth header with that info
+
+ authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format(
+ algorithm, access_key, credential_scope, signed_headers, signature
+ )
+
+ # PERFORM THE REQUEST!
+
+ url = "https://" + host + uri
+
+ if query_string != "":
+ url = url + "?" + query_string
+
+ final_headers = {
+ "x-amz-date": amz_date,
+ "Authorization": authorization_header,
+ }
+
+ final_headers.update(headers)
+
+ return open_url(url, method=method, data=body, headers=final_headers)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/waf.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/waf.py
new file mode 100644
index 00000000..3ecc645c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/waf.py
@@ -0,0 +1,224 @@
+# Copyright (c) 2017 Will Thames
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Web Application Firewall modules
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported HAS_BOTO3
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from .ec2 import AWSRetry
+from .waiters import get_waiter
+
+
+MATCH_LOOKUP = {
+ 'byte': {
+ 'method': 'byte_match_set',
+ 'conditionset': 'ByteMatchSet',
+ 'conditiontuple': 'ByteMatchTuple',
+ 'type': 'ByteMatch'
+ },
+ 'geo': {
+ 'method': 'geo_match_set',
+ 'conditionset': 'GeoMatchSet',
+ 'conditiontuple': 'GeoMatchConstraint',
+ 'type': 'GeoMatch'
+ },
+ 'ip': {
+ 'method': 'ip_set',
+ 'conditionset': 'IPSet',
+ 'conditiontuple': 'IPSetDescriptor',
+ 'type': 'IPMatch'
+ },
+ 'regex': {
+ 'method': 'regex_match_set',
+ 'conditionset': 'RegexMatchSet',
+ 'conditiontuple': 'RegexMatchTuple',
+ 'type': 'RegexMatch'
+ },
+ 'size': {
+ 'method': 'size_constraint_set',
+ 'conditionset': 'SizeConstraintSet',
+ 'conditiontuple': 'SizeConstraint',
+ 'type': 'SizeConstraint'
+ },
+ 'sql': {
+ 'method': 'sql_injection_match_set',
+ 'conditionset': 'SqlInjectionMatchSet',
+ 'conditiontuple': 'SqlInjectionMatchTuple',
+ 'type': 'SqlInjectionMatch',
+ },
+ 'xss': {
+ 'method': 'xss_match_set',
+ 'conditionset': 'XssMatchSet',
+ 'conditiontuple': 'XssMatchTuple',
+ 'type': 'XssMatch'
+ },
+}
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_rule_with_backoff(client, rule_id):
+ return client.get_rule(RuleId=rule_id)['Rule']
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_byte_match_set_with_backoff(client, byte_match_set_id):
+ return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet']
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_ip_set_with_backoff(client, ip_set_id):
+ return client.get_ip_set(IPSetId=ip_set_id)['IPSet']
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_size_constraint_set_with_backoff(client, size_constraint_set_id):
+ return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet']
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id):
+ return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet']
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_xss_match_set_with_backoff(client, xss_match_set_id):
+ return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet']
+
+
+def get_rule(client, module, rule_id):
+ try:
+ rule = get_rule_with_backoff(client, rule_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain waf rule")
+
+ match_sets = {
+ 'ByteMatch': get_byte_match_set_with_backoff,
+ 'IPMatch': get_ip_set_with_backoff,
+ 'SizeConstraint': get_size_constraint_set_with_backoff,
+ 'SqlInjectionMatch': get_sql_injection_match_set_with_backoff,
+ 'XssMatch': get_xss_match_set_with_backoff
+ }
+ if 'Predicates' in rule:
+ for predicate in rule['Predicates']:
+ if predicate['Type'] in match_sets:
+ predicate.update(match_sets[predicate['Type']](client, predicate['DataId']))
+ # replaced by Id from the relevant MatchSet
+ del(predicate['DataId'])
+ return rule
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_web_acl_with_backoff(client, web_acl_id):
+ return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
+
+
+def get_web_acl(client, module, web_acl_id):
+ try:
+ web_acl = get_web_acl_with_backoff(client, web_acl_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acl")
+
+ if web_acl:
+ try:
+ for rule in web_acl['Rules']:
+ rule.update(get_rule(client, module, rule['RuleId']))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acl rule")
+ return camel_dict_to_snake_dict(web_acl)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_rules_with_backoff(client):
+ paginator = client.get_paginator('list_rules')
+ return paginator.paginate().build_full_result()['Rules']
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_regional_rules_with_backoff(client):
+ resp = client.list_rules()
+ rules = []
+ while resp:
+ rules += resp['Rules']
+ resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
+ return rules
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_web_acls_with_backoff(client):
+ paginator = client.get_paginator('list_web_acls')
+ return paginator.paginate().build_full_result()['WebACLs']
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def list_regional_web_acls_with_backoff(client):
+ resp = client.list_web_acls()
+ acls = []
+ while resp:
+ acls += resp['WebACLs']
+ resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
+ return acls
+
+
+def list_web_acls(client, module):
+ try:
+ if client.__class__.__name__ == 'WAF':
+ return list_web_acls_with_backoff(client)
+ elif client.__class__.__name__ == 'WAFRegional':
+ return list_regional_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acls")
+
+
+def get_change_token(client, module):
+ try:
+ token = client.get_change_token()
+ return token['ChangeToken']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain change token")
+
+
+@AWSRetry.backoff(tries=10, delay=2, backoff=2.0, catch_extra_error_codes=['WAFStaleDataException'])
+def run_func_with_change_token_backoff(client, module, params, func, wait=False):
+ params['ChangeToken'] = get_change_token(client, module)
+ result = func(**params)
+ if wait:
+ get_waiter(
+ client, 'change_token_in_sync',
+ ).wait(
+ ChangeToken=result['ChangeToken']
+ )
+ return result
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/waiters.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
new file mode 100644
index 00000000..ff1aac88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
@@ -0,0 +1,551 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+
+try:
+ import botocore.waiter as core_waiter
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+import ansible_collections.amazon.aws.plugins.module_utils.core as aws_core
+
+
+ec2_data = {
+ "version": 2,
+ "waiters": {
+ "ImageAvailable": {
+ "operation": "DescribeImages",
+ "maxAttempts": 80,
+ "delay": 15,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "Images[].State",
+ "expected": "available"
+ },
+ {
+ "state": "failure",
+ "matcher": "pathAny",
+ "argument": "Images[].State",
+ "expected": "failed"
+ }
+ ]
+ },
+ "InternetGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeInternetGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(InternetGateways) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInternetGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "NetworkInterfaceAttached": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "attached",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.Status"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "NetworkInterfaceAvailable": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Status"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "retry"
+ },
+ ]
+ },
+ "NetworkInterfaceDeleteOnTerminate": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 10,
+ "acceptors": [
+ {
+ "expected": True,
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "NetworkInterfaceNoDeleteOnTerminate": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 10,
+ "acceptors": [
+ {
+ "expected": False,
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "RouteTableExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeRouteTables",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(RouteTables[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidRouteTableID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SecurityGroupExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSecurityGroups",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(SecurityGroups[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidGroup.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetHasMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetHasAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetDeleted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "retry"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "success"
+ },
+ ]
+ },
+ "VpnGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(VpnGateways[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpnGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "VpnGatewayDetached": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "VpnGateways[0].State == 'available'",
+ "state": "success"
+ },
+ ]
+ },
+ }
+}
+
+
+waf_data = {
+ "version": 2,
+ "waiters": {
+ "ChangeTokenInSync": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "GetChangeTokenStatus",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "ChangeTokenStatus == 'INSYNC'",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "WAFInternalErrorException",
+ "state": "retry"
+ }
+ ]
+ }
+ }
+}
+
+eks_data = {
+ "version": 2,
+ "waiters": {
+ "ClusterActive": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "cluster.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "ClusterDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "cluster.status != 'DELETED'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ }
+ }
+}
+
+
+rds_data = {
+ "version": 2,
+ "waiters": {
+ "DBInstanceStopped": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].DBInstanceStatus",
+ "expected": "stopped"
+ },
+ ]
+ }
+ }
+}
+
+
+def _inject_limit_retries(model):
+
+ extra_retries = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling']
+
+ acceptors = []
+ for error in extra_retries:
+ acceptors.append({"state": "success", "matcher": "error", "expected": error})
+
+ _model = copy.deepcopy(model)
+
+ for waiter in model["waiters"]:
+ _model["waiters"][waiter]["acceptors"].extend(acceptors)
+
+ return _model
+
+
+def ec2_model(name):
+ ec2_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(ec2_data))
+ return ec2_models.get_waiter(name)
+
+
+def waf_model(name):
+ waf_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(waf_data))
+ return waf_models.get_waiter(name)
+
+
+def eks_model(name):
+ eks_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(eks_data))
+ return eks_models.get_waiter(name)
+
+
+def rds_model(name):
+ rds_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(rds_data))
+ return rds_models.get_waiter(name)
+
+
+waiters_by_name = {
+ ('EC2', 'image_available'): lambda ec2: core_waiter.Waiter(
+ 'image_available',
+ ec2_model('ImageAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_images
+ )),
+ ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'internet_gateway_exists',
+ ec2_model('InternetGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_internet_gateways
+ )),
+ ('EC2', 'network_interface_attached'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_attached',
+ ec2_model('NetworkInterfaceAttached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_available'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_available',
+ ec2_model('NetworkInterfaceAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_delete_on_terminate'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_delete_on_terminate',
+ ec2_model('NetworkInterfaceDeleteOnTerminate'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_no_delete_on_terminate'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_no_delete_on_terminate',
+ ec2_model('NetworkInterfaceNoDeleteOnTerminate'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
+ 'route_table_exists',
+ ec2_model('RouteTableExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_route_tables
+ )),
+ ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
+ 'security_group_exists',
+ ec2_model('SecurityGroupExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_security_groups
+ )),
+ ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
+ 'subnet_exists',
+ ec2_model('SubnetExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_map_public',
+ ec2_model('SubnetHasMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_map_public',
+ ec2_model('SubnetNoMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_assign_ipv6',
+ ec2_model('SubnetHasAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_assign_ipv6',
+ ec2_model('SubnetNoAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
+ 'subnet_deleted',
+ ec2_model('SubnetDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_exists',
+ ec2_model('VpnGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_detached',
+ ec2_model('VpnGatewayDetached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
+ 'cluster_active',
+ eks_model('ClusterActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
+ 'cluster_deleted',
+ eks_model('ClusterDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
+ 'db_instance_stopped',
+ rds_model('DBInstanceStopped'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+}
+
+
+def get_waiter(client, waiter_name):
+ if isinstance(client, aws_core._RetryingBotoClientWrapper):
+ return get_waiter(client.client, waiter_name)
+ try:
+ return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
+ except KeyError:
+ raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
+ waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_facts.py
new file mode 100644
index 00000000..42f12323
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_facts.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_az_info
+short_description: Gather information about availability zones in AWS.
+version_added: 1.0.0
+description:
+ - Gather information about availability zones in AWS.
+ - This module was called M(amazon.aws.aws_az_facts) before Ansible 2.9. The usage did not change.
+author: 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply.
+ - Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for possible filters.
+ - Filter names and values are case sensitive.
+ - You can use underscores instead of dashes (-) in the filter keys.
+ - Filter keys with underscores will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [botocore, boto3]
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all availability zones
+ amazon.aws.aws_az_info:
+
+- name: Gather information about a single availability zone
+ amazon.aws.aws_az_info:
+ filters:
+ zone-name: eu-west-1a
+'''
+
+RETURN = '''
+availability_zones:
+ returned: on success
+ description: >
+ Availability zones that match the provided filters. Each element consists of a dict with all the information
+ related to that available zone.
+ type: list
+ sample: "[
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1b'
+ },
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1c'
+ }
+ ]"
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'aws_az_facts':
+ module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", date='2022-06-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict(module.params.get('filters'))
+ for k in module.params.get('filters').keys():
+ if "_" in k:
+ sanitized_filters[k.replace('_', '-')] = sanitized_filters[k]
+ del sanitized_filters[k]
+
+ try:
+ availability_zones = connection.describe_availability_zones(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe availability zones.")
+
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
+
+ module.exit_json(availability_zones=snaked_availability_zones)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py
new file mode 100644
index 00000000..42f12323
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: aws_az_info
+short_description: Gather information about availability zones in AWS.
+version_added: 1.0.0
+description:
+ - Gather information about availability zones in AWS.
+ - This module was called M(amazon.aws.aws_az_facts) before Ansible 2.9. The usage did not change.
+author: 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply.
+ - Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for possible filters.
+ - Filter names and values are case sensitive.
+ - You can use underscores instead of dashes (-) in the filter keys.
+ - Filter keys with underscores will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [botocore, boto3]
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all availability zones
+ amazon.aws.aws_az_info:
+
+- name: Gather information about a single availability zone
+ amazon.aws.aws_az_info:
+ filters:
+ zone-name: eu-west-1a
+'''
+
+RETURN = '''
+availability_zones:
+ returned: on success
+ description: >
+ Availability zones that match the provided filters. Each element consists of a dict with all the information
+ related to that available zone.
+ type: list
+ sample: "[
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1b'
+ },
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1c'
+ }
+ ]"
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'aws_az_facts':
+ module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", date='2022-06-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict(module.params.get('filters'))
+ for k in module.params.get('filters').keys():
+ if "_" in k:
+ sanitized_filters[k.replace('_', '-')] = sanitized_filters[k]
+ del sanitized_filters[k]
+
+ try:
+ availability_zones = connection.describe_availability_zones(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe availability zones.")
+
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
+
+ module.exit_json(availability_zones=snaked_availability_zones)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_facts.py
new file mode 100644
index 00000000..91880fdb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_facts.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_caller_info
+version_added: 1.0.0
+short_description: Get information about the user and account being used to make AWS calls.
+description:
+ - This module returns information about the account and user / role from which the AWS access tokens originate.
+ - The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory.
+ - This module was called M(amazon.aws.aws_caller_facts) before Ansible 2.9. The usage did not change.
+
+author:
+ - Ed Costello (@orthanc)
+ - Stijn Dubrul (@sdubrul)
+
+requirements: [ 'botocore', 'boto3' ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get the current caller identity information
+ amazon.aws.aws_caller_info:
+ register: caller_info
+'''
+
+RETURN = '''
+account:
+ description: The account id the access credentials are associated with.
+ returned: success
+ type: str
+ sample: "123456789012"
+account_alias:
+ description: The account alias the access credentials are associated with.
+ returned: when caller has the iam:ListAccountAliases permission
+ type: str
+ sample: "acme-production"
+arn:
+ description: The arn identifying the user the credentials are associated with.
+ returned: success
+ type: str
+ sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name
+user_id:
+ description: |
+ The user id the access credentials are associated with. Note that this may not correspond to
+ anything you can look up in the case of roles or federated identities.
+ returned: success
+ type: str
+ sample: 123456789012:my-federated-user-name
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+ if module._name == 'aws_caller_facts':
+ module.deprecate("The 'aws_caller_facts' module has been renamed to 'aws_caller_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+
+ try:
+ caller_info = client.get_caller_identity(aws_retry=True)
+ caller_info.pop('ResponseMetadata', None)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve caller identity')
+
+ iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ try:
+ # Although a list is returned by list_account_aliases AWS supports maximum one alias per account.
+ # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias.
+ # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output
+ response = iam_client.list_account_aliases(aws_retry=True)
+ if response and response['AccountAliases']:
+ caller_info['account_alias'] = response['AccountAliases'][0]
+ else:
+ caller_info['account_alias'] = ''
+ except (BotoCoreError, ClientError) as e:
+ # The iam:ListAccountAliases permission is required for this operation to succeed.
+ # Lacking this permission is handled gracefully by not returning the account_alias.
+ pass
+
+ module.exit_json(
+ changed=False,
+ **camel_dict_to_snake_dict(caller_info))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py
new file mode 100644
index 00000000..91880fdb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_caller_info
+version_added: 1.0.0
+short_description: Get information about the user and account being used to make AWS calls.
+description:
+ - This module returns information about the account and user / role from which the AWS access tokens originate.
+ - The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory.
+ - This module was called M(amazon.aws.aws_caller_facts) before Ansible 2.9. The usage did not change.
+
+author:
+ - Ed Costello (@orthanc)
+ - Stijn Dubrul (@sdubrul)
+
+requirements: [ 'botocore', 'boto3' ]
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get the current caller identity information
+ amazon.aws.aws_caller_info:
+ register: caller_info
+'''
+
+RETURN = '''
+account:
+ description: The account id the access credentials are associated with.
+ returned: success
+ type: str
+ sample: "123456789012"
+account_alias:
+ description: The account alias the access credentials are associated with.
+ returned: when caller has the iam:ListAccountAliases permission
+ type: str
+ sample: "acme-production"
+arn:
+ description: The arn identifying the user the credentials are associated with.
+ returned: success
+ type: str
+ sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name
+user_id:
+ description: |
+ The user id the access credentials are associated with. Note that this may not correspond to
+ anything you can look up in the case of roles or federated identities.
+ returned: success
+ type: str
+ sample: 123456789012:my-federated-user-name
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+ if module._name == 'aws_caller_facts':
+ module.deprecate("The 'aws_caller_facts' module has been renamed to 'aws_caller_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+
+ try:
+ caller_info = client.get_caller_identity(aws_retry=True)
+ caller_info.pop('ResponseMetadata', None)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to retrieve caller identity')
+
+ iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ try:
+ # Although a list is returned by list_account_aliases AWS supports maximum one alias per account.
+ # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias.
+ # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output
+ response = iam_client.list_account_aliases(aws_retry=True)
+ if response and response['AccountAliases']:
+ caller_info['account_alias'] = response['AccountAliases'][0]
+ else:
+ caller_info['account_alias'] = ''
+ except (BotoCoreError, ClientError) as e:
+ # The iam:ListAccountAliases permission is required for this operation to succeed.
+ # Lacking this permission is handled gracefully by not returning the account_alias.
+ pass
+
+ module.exit_json(
+ changed=False,
+ **camel_dict_to_snake_dict(caller_info))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_s3.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_s3.py
new file mode 100644
index 00000000..eb6d8b90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/aws_s3.py
@@ -0,0 +1,947 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: aws_s3
+version_added: 1.0.0
+short_description: manage objects in S3.
+description:
+ - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
+ deleting both objects and buckets, retrieving objects as files or strings and generating download links.
+ This module has a dependency on boto3 and botocore.
+options:
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ type: str
+ dest:
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ type: path
+ encrypt:
+ description:
+ - When set for PUT mode, asks for server-side encryption.
+ default: true
+ type: bool
+ encryption_mode:
+ description:
+ - What encryption mode to use if I(encrypt=true).
+ default: AES256
+ choices:
+ - AES256
+ - aws:kms
+ type: str
+ expiry:
+ description:
+ - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
+ default: 600
+ aliases: ['expiration']
+ type: int
+ headers:
+ description:
+ - Custom headers for PUT operation, as a dictionary of C(key=value) and C(key=value,key=value).
+ type: dict
+ marker:
+ description:
+ - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
+ type: str
+ max_keys:
+ description:
+ - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
+ default: 1000
+ type: int
+ metadata:
+ description:
+ - Metadata for PUT operation, as a dictionary of C(key=value) and C(key=value,key=value).
+ type: dict
+ mode:
+ description:
+ - Switches the module behaviour between C(put) (upload), C(get) (download), C(geturl) (return download url, Ansible 1.3+),
+ C(getstr) (download object as string (1.3+)), C(list) (list keys, Ansible 2.0+), C(create) (bucket), C(delete) (bucket),
+ and delobj (delete object, Ansible 2.0+).
+ required: true
+ choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
+ type: str
+ object:
+ description:
+ - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
+ type: str
+ permission:
+ description:
+ - This option lets the user set the canned permissions on the object/bucket that are created.
+ The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
+ C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
+ C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
+ default: ['private']
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Limits the response to keys that begin with the specified prefix for list mode.
+ default: ""
+ type: str
+ version:
+ description:
+ - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
+ type: str
+ overwrite:
+ description:
+ - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ - Must be a Boolean, C(always), C(never) or C(different).
+ - C(true) is the same as C(always).
+ - C(false) is equal to C(never).
+ - When this is set to C(different) the MD5 sum of the local file is compared with the 'ETag' of the object/key in S3.
+ The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
+ U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html).
+ default: 'always'
+ aliases: ['force']
+ type: str
+ retries:
+ description:
+ - On recoverable failure, how many times to retry before actually failing.
+ default: 0
+ type: int
+ aliases: ['retry']
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
+ aliases: [ S3_URL ]
+ type: str
+ dualstack:
+ description:
+ - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
+ - Requires at least botocore version 1.4.45.
+ type: bool
+ default: false
+ rgw:
+ description:
+ - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
+ default: false
+ type: bool
+ src:
+ description:
+ - The source file path when performing a PUT operation.
+ - Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise.
+ type: path
+ content:
+ description:
+ - The content to PUT into an object.
+ - The parameter value will be treated as a string and converted to UTF-8 before sending it to S3.
+ To send binary data, use the I(content_base64) parameter instead.
+ - Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise.
+ version_added: "1.3.0"
+ type: str
+ content_base64:
+ description:
+ - The base64-encoded binary data to PUT into an object.
+ - Use this if you need to put raw binary data, and don't forget to encode in base64.
+ - Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise.
+ version_added: "1.3.0"
+ type: str
+ ignore_nonexistent_bucket:
+ description:
+ - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
+ GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
+ I(ignore_nonexistent_bucket=true)."
+ type: bool
+ default: false
+ encryption_kms_key_id:
+ description:
+ - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms).
+ type: str
+requirements: [ "boto3", "botocore" ]
+author:
+ - "Lester Wade (@lwade)"
+ - "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Simple PUT operation
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+
+- name: PUT operation from a rendered template
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /object.yaml
+ content: "{{ lookup('template', 'templates/object.yaml.j2') }}"
+ mode: put
+
+- name: Simple PUT operation in Ceph RGW S3
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ rgw: true
+ s3_url: "http://localhost:8000"
+
+- name: Simple GET operation
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Get a specific version of an object.
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ version: 48c9ee5131af7a716edc22df9772aa6f
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: PUT/upload with metadata
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
+
+- name: PUT/upload with custom headers
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
+
+- name: List keys simple
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ mode: list
+
+- name: List keys all options
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ mode: list
+ prefix: /my/desired/
+ marker: /my/desired/0023.txt
+ max_keys: 472
+
+- name: Create an empty bucket
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ mode: create
+ permission: public-read
+
+- name: Create a bucket with key as directory, in the EU region
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+ region: eu-west-1
+
+- name: Delete a bucket and all contents
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ mode: delete
+
+- name: GET an object but don't download if the file checksums match. New in 2.0
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+ overwrite: different
+
+- name: Delete an object from a bucket
+ amazon.aws.aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ mode: delobj
+'''
+
+RETURN = '''
+msg:
+ description: Message indicating the status of the operation.
+ returned: always
+ type: str
+ sample: PUT operation complete
+url:
+ description: URL of the object.
+ returned: (for put and geturl operations)
+ type: str
+ sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
+expiry:
+ description: Number of seconds the presigned url is valid for.
+ returned: (for geturl operation)
+ type: int
+ sample: 600
+contents:
+ description: Contents of the object as string.
+ returned: (for getstr operation)
+ type: str
+ sample: "Hello, world!"
+s3_keys:
+ description: List of object keys.
+ returned: (for list operation)
+ type: list
+ elements: str
+ sample:
+ - prefix1/
+ - prefix1/key1
+ - prefix1/key2
+'''
+
+import mimetypes
+import os
+import io
+from ssl import SSLError
+import base64
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.basic import to_text
+from ansible.module_utils.basic import to_native
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.core import is_boto3_error_message
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import boto3_conn
+from ..module_utils.ec2 import get_aws_connection_info
+from ..module_utils.s3 import HAS_MD5
+from ..module_utils.s3 import calculate_etag
+from ..module_utils.s3 import calculate_etag_content
+
+IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
+
+
+class Sigv4Required(Exception):
+ pass
+
+
+def key_check(module, s3, bucket, obj, version=None, validate=True):
+ try:
+ if version:
+ s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ s3.head_object(Bucket=bucket, Key=obj)
+ except is_boto3_error_code('404'):
+ return False
+ except is_boto3_error_code('403') as e:
+ if validate is True:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+
+ return True
+
+
+def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content=None):
+ s3_etag = get_etag(s3, bucket, obj, version=version)
+ if local_file is not None:
+ local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
+ else:
+ local_etag = calculate_etag_content(module, content, s3_etag, s3, bucket, obj, version)
+
+ return s3_etag == local_etag
+
+
+def get_etag(s3, bucket, obj, version=None):
+ if version:
+ key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key_check = s3.head_object(Bucket=bucket, Key=obj)
+ if not key_check:
+ return None
+ return key_check['ETag']
+
+
+def bucket_check(module, s3, bucket, validate=True):
+ exists = True
+ try:
+ s3.head_bucket(Bucket=bucket)
+ except is_boto3_error_code('404'):
+ return False
+ except is_boto3_error_code('403') as e:
+ if validate is True:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ return exists
+
+
+def create_bucket(module, s3, bucket, location=None):
+ if module.check_mode:
+ module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ try:
+ if len(configuration) > 0:
+ s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
+ else:
+ s3.create_bucket(Bucket=bucket)
+ if module.params.get('permission'):
+ # Wait for the bucket to exist before setting ACLs
+ s3.get_waiter('bucket_exists').wait(Bucket=bucket)
+ for acl in module.params.get('permission'):
+ AWSRetry.jittered_backoff(
+ max_delay=120, catch_extra_error_codes=['NoSuchBucket']
+ )(s3.put_bucket_acl)(ACL=acl, Bucket=bucket)
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS):
+ module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
+
+ if bucket:
+ return True
+
+
+def paginated_list(s3, **pagination_params):
+ pg = s3.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versioned_list_with_fallback(s3, **pagination_params):
+ try:
+ versioned_pg = s3.get_paginator('list_object_versions')
+ for page in versioned_pg.paginate(**pagination_params):
+ delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
+ current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
+ yield delete_markers + current_objects
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']):
+ for page in paginated_list(s3, **pagination_params):
+ yield [{'Key': data['Key']} for data in page]
+
+
+def list_keys(module, s3, bucket, prefix, marker, max_keys):
+ pagination_params = {'Bucket': bucket}
+ for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
+ pagination_params[param_name] = param_value
+ try:
+ keys = sum(paginated_list(s3, **pagination_params), [])
+ module.exit_json(msg="LIST operation complete", s3_keys=keys)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
+
+
+def delete_bucket(module, s3, bucket):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ exists = bucket_check(module, s3, bucket)
+ if exists is False:
+ return False
+ # if there are contents then we need to delete them before we can delete the bucket
+ for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
+ if keys:
+ s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
+ s3.delete_bucket(Bucket=bucket)
+ return True
+ except is_boto3_error_code('NoSuchBucket'):
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
+
+
+def delete_key(module, s3, bucket, obj):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ s3.delete_object(Bucket=bucket, Key=obj)
+ module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
+
+
+def create_dirkey(module, s3, bucket, obj, encrypt):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
+ if encrypt:
+ params['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+
+ s3.put_object(**params)
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS):
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True)
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def option_in_extra_args(option):
+ temp_option = option.replace('-', '').lower()
+
+ allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
+ 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
+ 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
+ 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
+ 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
+ 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
+ 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
+
+ if temp_option in allowed_extra_args:
+ return allowed_extra_args[temp_option]
+
+
+def upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=None, content=None):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ extra = {}
+ if encrypt:
+ extra['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+ if metadata:
+ extra['Metadata'] = {}
+
+ # determine object metadata and extra arguments
+ for option in metadata:
+ extra_args_option = option_in_extra_args(option)
+ if extra_args_option is not None:
+ extra[extra_args_option] = metadata[option]
+ else:
+ extra['Metadata'][option] = metadata[option]
+
+ if 'ContentType' not in extra:
+ content_type = None
+ if src is not None:
+ content_type = mimetypes.guess_type(src)[0]
+ if content_type is None:
+ # s3 default content type
+ content_type = 'binary/octet-stream'
+ extra['ContentType'] = content_type
+
+ if src is not None:
+ s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
+ else:
+ f = io.BytesIO(content)
+ s3.upload_fileobj(Fileobj=f, Bucket=bucket, Key=obj, ExtraArgs=extra)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to complete PUT operation.")
+ try:
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS):
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to set object ACL")
+ try:
+ url = s3.generate_presigned_url(ClientMethod='put_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to generate presigned URL")
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+
+
+def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ # retries is the number of loops; range/xrange needs to be one
+ # more to get that count of loops.
+ try:
+ if version:
+ key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key = s3.get_object(Bucket=bucket, Key=obj)
+ except is_boto3_error_code(['404', '403']) as e:
+ # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
+ # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+ except is_boto3_error_message('require AWS Signature Version 4'):
+ raise Sigv4Required()
+ except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+
+ optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
+ for x in range(0, retries + 1):
+ try:
+ s3.download_file(bucket, obj, dest, **optional_kwargs)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
+ # otherwise, try again, this may be a transient timeout.
+ except SSLError as e: # will ClientError catch SSLError?
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="s3 download failed")
+ # otherwise, try again, this may be a transient timeout.
+
+
+def download_s3str(module, s3, bucket, obj, version=None, validate=True):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ try:
+ if version:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
+ else:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except is_boto3_error_message('require AWS Signature Version 4'):
+ raise Sigv4Required()
+ except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+
+
+def get_download_url(module, s3, bucket, obj, expiry, changed=True):
+ try:
+ url = s3.generate_presigned_url(ClientMethod='get_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while getting download url.")
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
+ if s3_url and rgw: # TODO - test this
+ rgw = urlparse(s3_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse(s3_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ elif module.params['mode'] in ('get', 'getstr') and sig_4:
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ if module.params['dualstack']:
+ dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
+ if 'config' in params:
+ params['config'] = params['config'].merge(dualconf)
+ else:
+ params['config'] = dualconf
+ return boto3_conn(**params)
+
+
+def main():
+ argument_spec = dict(
+ bucket=dict(required=True),
+ dest=dict(default=None, type='path'),
+ encrypt=dict(default=True, type='bool'),
+ encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
+ expiry=dict(default=600, type='int', aliases=['expiration']),
+ headers=dict(type='dict'),
+ marker=dict(default=""),
+ max_keys=dict(default=1000, type='int'),
+ metadata=dict(type='dict'),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
+ object=dict(),
+ permission=dict(type='list', elements='str', default=['private']),
+ version=dict(default=None),
+ overwrite=dict(aliases=['force'], default='always'),
+ prefix=dict(default=""),
+ retries=dict(aliases=['retry'], type='int', default=0),
+ s3_url=dict(aliases=['S3_URL']),
+ dualstack=dict(default='no', type='bool'),
+ rgw=dict(default='no', type='bool'),
+ src=dict(type='path'),
+ content=dict(),
+ content_base64=dict(),
+ ignore_nonexistent_bucket=dict(default=False, type='bool'),
+ encryption_kms_key_id=dict()
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['mode', 'put', ['object']],
+ ['mode', 'get', ['dest', 'object']],
+ ['mode', 'getstr', ['object']],
+ ['mode', 'geturl', ['object']]],
+ mutually_exclusive=[['content', 'content_base64', 'src']],
+ )
+
+ bucket = module.params.get('bucket')
+ encrypt = module.params.get('encrypt')
+ expiry = module.params.get('expiry')
+ dest = module.params.get('dest', '')
+ headers = module.params.get('headers')
+ marker = module.params.get('marker')
+ max_keys = module.params.get('max_keys')
+ metadata = module.params.get('metadata')
+ mode = module.params.get('mode')
+ obj = module.params.get('object')
+ version = module.params.get('version')
+ overwrite = module.params.get('overwrite')
+ prefix = module.params.get('prefix')
+ retries = module.params.get('retries')
+ s3_url = module.params.get('s3_url')
+ dualstack = module.params.get('dualstack')
+ rgw = module.params.get('rgw')
+ src = module.params.get('src')
+ content = module.params.get('content')
+ content_base64 = module.params.get('content_base64')
+ ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
+
+ object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
+ bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
+
+ if overwrite not in ['always', 'never', 'different']:
+ if module.boolean(overwrite):
+ overwrite = 'always'
+ else:
+ overwrite = 'never'
+
+ if overwrite == 'different' and not HAS_MD5:
+ module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ if module.params.get('object'):
+ obj = module.params['object']
+ # If there is a top level object, do nothing - if the object starts with /
+ # remove the leading character to maintain compatibility with Ansible versions < 2.4
+ if obj.startswith('/'):
+ obj = obj[1:]
+
+ # Bucket deletion does not require obj. Prevents ambiguity with delobj.
+ if obj and mode == "delete":
+ module.fail_json(msg='Parameter obj cannot be used with mode=delete')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
+ module.fail_json(msg='dualstack only applies to AWS S3')
+
+ if dualstack and not module.botocore_at_least('1.4.45'):
+ module.fail_json(msg='dualstack requires botocore >= 1.4.45')
+
+ # rgw requires an explicit url
+ if rgw and not s3_url:
+ module.fail_json(msg='rgw flavour requires s3_url')
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to RGW, Walrus or fakes3
+ if s3_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
+
+ validate = not ignore_nonexistent_bucket
+
+ # separate types of ACLs
+ bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
+ object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
+ error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
+ if error_acl:
+ module.fail_json(msg='Unknown permission specified: %s' % error_acl)
+
+ # First, we check to see if the bucket exists, we get "bucket" returned.
+ bucketrtn = bucket_check(module, s3, bucket, validate=validate)
+
+ if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
+ module.fail_json(msg="Source bucket cannot be found.")
+
+ if mode == 'get':
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn is False:
+ if version:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if dest and path_check(dest) and overwrite != 'always':
+ if overwrite == 'never':
+ module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
+ if etag_compare(module, s3, bucket, obj, version=version, local_file=dest):
+ module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
+
+ try:
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+
+ if mode == 'put':
+
+ # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
+ # these were separated into the variables bucket_acl and object_acl above
+
+ if content is None and content_base64 is None and src is None:
+ module.fail_json('Either content, content_base64 or src must be specified for PUT operations')
+ if src is not None and not path_check(src):
+ module.fail_json('Local object "%s" does not exist for PUT operation' % (src))
+
+ if bucketrtn:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ else:
+ # If the bucket doesn't exist we should create it.
+ # only use valid bucket acls for create_bucket function
+ module.params['permission'] = bucket_acl
+ create_bucket(module, s3, bucket, location)
+
+ # the content will be uploaded as a byte string, so we must encode it first
+ bincontent = None
+ if content is not None:
+ bincontent = content.encode('utf-8')
+ if content_base64 is not None:
+ bincontent = base64.standard_b64decode(content_base64)
+
+ if keyrtn and overwrite != 'always':
+ if overwrite == 'never' or etag_compare(module, s3, bucket, obj, version=version, local_file=src, content=bincontent):
+ # Return the download URL for the existing object
+ get_download_url(module, s3, bucket, obj, expiry, changed=False)
+
+ # only use valid object acls for the upload_s3file function
+ module.params['permission'] = object_acl
+ upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=src, content=bincontent)
+
+ # Delete an object from a bucket, not the entire bucket
+ if mode == 'delobj':
+ if obj is None:
+ module.fail_json(msg="object parameter is required")
+ if bucket:
+ deletertn = delete_key(module, s3, bucket, obj)
+ if deletertn is True:
+ module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Delete an entire bucket, including all objects in the bucket
+ if mode == 'delete':
+ if bucket:
+ deletertn = delete_bucket(module, s3, bucket)
+ if deletertn is True:
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Support for listing a set of keys
+ if mode == 'list':
+ exists = bucket_check(module, s3, bucket)
+
+ # If the bucket does not exist then bail out
+ if not exists:
+ module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
+
+ list_keys(module, s3, bucket, prefix, marker, max_keys)
+
+ # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
+ # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
+ if mode == 'create':
+
+ # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
+ # these were separated above into the variables bucket_acl and object_acl
+
+ if bucket and not obj:
+ if bucketrtn:
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ # only use valid bucket acls when creating the bucket
+ module.params['permission'] = bucket_acl
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+ if bucketrtn:
+ if key_check(module, s3, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ # setting valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt)
+ else:
+ # only use valid bucket acls for the create_bucket function
+ module.params['permission'] = bucket_acl
+ created = create_bucket(module, s3, bucket, location)
+ # only use valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt)
+
+ # Support for grabbing the time-expired URL for an object in S3/Walrus.
+ if mode == 'geturl':
+ if not bucket and not obj:
+ module.fail_json(msg="Bucket and Object parameters must be set")
+
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ get_download_url(module, s3, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if mode == 'getstr':
+ if bucket and obj:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ try:
+ download_s3str(module, s3, bucket, obj, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
+ download_s3str(module, s3, bucket, obj, version=version)
+ elif version is not None:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ module.exit_json(failed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
new file mode 100644
index 00000000..030bfc45
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
@@ -0,0 +1,808 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation
+version_added: 1.0.0
+short_description: Create or delete an AWS CloudFormation stack
+description:
+ - Launches or updates an AWS CloudFormation stack and waits for it complete.
+notes:
+ - CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh.
+ The version listed in the requirements is the oldest version that works with the module as a whole.
+ Some features may require recent versions, and we do not pinpoint a minimum version for each feature.
+ Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs.
+options:
+ stack_name:
+ description:
+ - Name of the CloudFormation stack.
+ required: true
+ type: str
+ disable_rollback:
+ description:
+ - If a stacks fails to form, rollback will remove the stack.
+ default: false
+ type: bool
+ on_create_failure:
+ description:
+ - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option.
+ choices:
+ - DO_NOTHING
+ - ROLLBACK
+ - DELETE
+ type: str
+ create_timeout:
+ description:
+ - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
+ type: int
+ template_parameters:
+ description:
+ - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
+ - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
+ default: {}
+ type: dict
+ state:
+ description:
+ - If I(state=present), stack will be created.
+ - If I(state=present) and if stack exists and template has changed, it will be updated.
+ - If I(state=absent), stack will be removed.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ template:
+ description:
+ - The local path of the CloudFormation template.
+ - This must be the full path to the file, relative to the working directory. If using roles this may look
+ like C(roles/cloudformation/files/cloudformation-example.json).
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template),
+ I(template_body) nor I(template_url) are specified, the previous template will be reused.
+ type: path
+ notification_arns:
+ description:
+ - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events.
+ type: str
+ stack_policy:
+ description:
+ - The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified.
+ for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
+ type: str
+ tags:
+ description:
+ - Dictionary of tags to associate with stack and its resources during stack creation.
+ - Can be updated later, updating tags removes previous entries.
+ type: dict
+ template_url:
+ description:
+ - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an
+ S3 bucket in the same region as the stack.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified,
+ the previous template will be reused.
+ type: str
+ create_changeset:
+ description:
+ - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)."
+ - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be
+ deleted immediately with no changeset."
+ type: bool
+ default: false
+ changeset_name:
+ description:
+ - Name given to the changeset when creating a changeset.
+ - Only used when I(create_changeset=true).
+ - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters.
+ See the AWS Change Sets docs for more information
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
+ type: str
+ template_format:
+ description:
+ - This parameter is ignored since Ansible 2.3 and will be removed after 2022-06-01.
+ - Templates are now passed raw to CloudFormation regardless of format.
+ type: str
+ role_arn:
+ description:
+ - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
+ docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
+ type: str
+ termination_protection:
+ description:
+ - Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
+ type: bool
+ template_body:
+ description:
+ - Template body. Use this to pass in the actual body of the CloudFormation template.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ type: str
+ events_limit:
+ description:
+ - Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
+ default: 200
+ type: int
+ backoff_delay:
+ description:
+ - Number of seconds to wait for the next retry.
+ default: 3
+ type: int
+ required: False
+ backoff_max_delay:
+ description:
+ - Maximum amount of time to wait between retries.
+ default: 30
+ type: int
+ required: False
+ backoff_retries:
+ description:
+ - Number of times to retry operation.
+ - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times.
+ default: 10
+ type: int
+ required: False
+ capabilities:
+ description:
+ - Specify capabilities that stack template contains.
+ - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND).
+ type: list
+ elements: str
+ default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
+
+author: "James S. Martin (@jsmartin)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ boto3, botocore>=1.5.45 ]
+'''
+
+EXAMPLES = '''
+- name: create a cloudformation stack
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ KeyName: "jmartin"
+ DiskType: "ephemeral"
+ InstanceType: "m1.small"
+ ClusterSize: 3
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Basic role example
+- name: create a stack, specify role that cloudformation assumes
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "roles/cloudformation/files/cloudformation-example.json"
+ role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
+
+- name: delete a stack
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation-old"
+ state: "absent"
+
+# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template via an URL
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template body via lookup template
+ amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_body: "{{ lookup('template', 'cloudformation.j2') }}"
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute
+# When use_previous_value is set to True, the given value will be ignored and
+# CloudFormation will use the value from a previously submitted template.
+# If use_previous_value is set to False (default) the given value is used.
+- amazon.aws.cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ DBSnapshotIdentifier:
+ use_previous_value: True
+ value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot
+ DBName:
+ use_previous_value: True
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Enable termination protection on a stack.
+# If the stack already exists, this will update its termination protection
+- name: enable termination protection during stack creation
+ amazon.aws.cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ termination_protection: yes
+
+# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
+# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
+- name: enable termination protection during stack creation
+ amazon.aws.cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ create_timeout: 5
+
+# Configure rollback behaviour on the unsuccessful creation of a stack allowing
+# CloudFormation to clean up, or do nothing in the event of an unsuccessful
+# deployment
+# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
+# it fails to create
+- name: create stack which will delete on creation failure
+ amazon.aws.cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ on_create_failure: DELETE
+'''
+
+RETURN = '''
+events:
+ type: list
+ description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
+ returned: always
+ sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
+log:
+ description: Debugging logs. Useful when modifying or finding an error.
+ returned: always
+ type: list
+ sample: ["updating stack"]
+change_set_id:
+ description: The ID of the stack change set if one was created
+ returned: I(state=present) and I(create_changeset=true)
+ type: str
+ sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0"
+stack_resources:
+ description: AWS stack resources and their status. List of dictionaries, one dict per resource.
+ returned: state == present
+ type: list
+ sample: [
+ {
+ "last_updated_time": "2016-10-11T19:40:14.979000+00:00",
+ "logical_resource_id": "CFTestSg",
+ "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
+ "resource_type": "AWS::EC2::SecurityGroup",
+ "status": "UPDATE_COMPLETE",
+ "status_reason": null
+ }
+ ]
+stack_outputs:
+ type: dict
+ description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
+ returned: state == present
+ sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
+''' # NOQA
+
+import json
+import time
+import traceback
+import uuid
+from hashlib import sha1
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils._text import to_native
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import boto_exception
+
+
+def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
+ '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
+ ret = {'events': [], 'log': []}
+
+ try:
+ pg = cfn.get_paginator(
+ 'describe_stack_events'
+ ).paginate(
+ StackName=stack_name,
+ PaginationConfig={'MaxItems': events_limit}
+ )
+ if token_filter is not None:
+ events = list(pg.search(
+ "StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
+ ))
+ else:
+ events = list(pg.search("StackEvents[*]"))
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ ret['log'].append('Stack does not exist.')
+ return ret
+ ret['log'].append('Unknown error: ' + str(error_msg))
+ return ret
+
+ for e in events:
+ eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
+ ret['events'].append(eventline)
+
+ if e['ResourceStatus'].endswith('FAILED'):
+ failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
+ ret['log'].append(failline)
+
+ return ret
+
+
+def create_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
+
+ # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
+ # 'OnFailure' only apply on creation, not update.
+ if module.params.get('on_create_failure') is not None:
+ stack_params['OnFailure'] = module.params['on_create_failure']
+ else:
+ stack_params['DisableRollback'] = module.params['disable_rollback']
+
+ if module.params.get('create_timeout') is not None:
+ stack_params['TimeoutInMinutes'] = module.params['create_timeout']
+ if module.params.get('termination_protection') is not None:
+ if boto_supports_termination_protection(cfn):
+ stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
+ else:
+ module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
+
+ try:
+ response = cfn.create_stack(**stack_params)
+ # Use stack ID to follow stack state in case of on_create_failure = DELETE
+ result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ module.fail_json_aws(err, msg="Failed to create stack {0}".format(stack_params.get('StackName')))
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def list_changesets(cfn, stack_name):
+ res = cfn.list_change_sets(StackName=stack_name)
+ return [cs['ChangeSetName'] for cs in res['Summaries']]
+
+
+def create_changeset(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template' or 'template_url' is required.")
+ if module.params['changeset_name'] is not None:
+ stack_params['ChangeSetName'] = module.params['changeset_name']
+
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ changeset_name = build_changeset_name(stack_params)
+ stack_params['ChangeSetName'] = changeset_name
+
+ # Determine if this changeset already exists
+ pending_changesets = list_changesets(cfn, stack_params['StackName'])
+ if changeset_name in pending_changesets:
+ warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
+ result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
+ else:
+ cs = cfn.create_change_set(**stack_params)
+ # Make sure we don't enter an infinite loop
+ time_end = time.time() + 600
+ while time.time() < time_end:
+ try:
+ newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
+ except botocore.exceptions.BotoCoreError as err:
+ module.fail_json_aws(err)
+ if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
+ time.sleep(1)
+ elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
+ cfn.delete_change_set(ChangeSetName=cs['Id'])
+ result = dict(changed=False,
+ output='The created Change Set did not contain any changes to this stack and was deleted.')
+ # a failed change set does not trigger any stack events so we just want to
+ # skip any further processing of result and just return it directly
+ return result
+ else:
+ break
+ # Lets not hog the cpu/spam the AWS API
+ time.sleep(1)
+ result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
+ result['change_set_id'] = cs['Id']
+ result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
+ 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
+ 'NOTE that dependencies on this stack might fail due to pending changes!']
+ except Exception as err:
+ error_msg = boto_exception(err)
+ if 'No updates are to be performed.' in error_msg:
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ else:
+ module.fail_json_aws(err, msg='Failed to create change set')
+
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ stack_params['UsePreviousTemplate'] = True
+
+ # if the state is present and the stack already exists, we try to update it.
+ # AWS will tell us if the stack template and parameters are the same and
+ # don't need to be updated.
+ try:
+ cfn.update_stack(**stack_params)
+ result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ error_msg = boto_exception(err)
+ if 'No updates are to be performed.' in error_msg:
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ else:
+ module.fail_json_aws(err, msg="Failed to update stack {0}".format(stack_params.get('StackName')))
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
+ '''updates termination protection of a stack'''
+ if not boto_supports_termination_protection(cfn):
+ module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
+ stack = get_stack_facts(cfn, stack_name)
+ if stack:
+ if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
+ try:
+ cfn.update_termination_protection(
+ EnableTerminationProtection=desired_termination_protection_state,
+ StackName=stack_name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+
+def boto_supports_termination_protection(cfn):
+ '''termination protection was added in botocore 1.7.18'''
+ return hasattr(cfn, "update_termination_protection")
+
+
+def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
+ '''gets the status of a stack while it is created/updated/deleted'''
+ existed = []
+ while True:
+ try:
+ stack = get_stack_facts(cfn, stack_name)
+ existed.append('yes')
+ except Exception:
+ # If the stack previously existed, and now can't be found then it's
+ # been deleted successfully.
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ if not stack:
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
+ return ret
+ # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
+ # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
+ elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
+ ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
+ return ret
+ elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
+ return ret
+ # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
+ elif stack['StackStatus'].endswith('_COMPLETE'):
+ ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
+ return ret
+ elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
+ return ret
+ # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
+ elif stack['StackStatus'].endswith('_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
+ return ret
+ else:
+ # this can loop forever :/
+ time.sleep(5)
+ return {'failed': True, 'output': 'Failed for unknown reasons.'}
+
+
+def build_changeset_name(stack_params):
+ if 'ChangeSetName' in stack_params:
+ return stack_params['ChangeSetName']
+
+ json_params = json.dumps(stack_params, sort_keys=True)
+
+ return 'Ansible-{0}-{1}'.format(
+ stack_params['StackName'],
+ sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
+ )
+
+
+def check_mode_changeset(module, stack_params, cfn):
+ """Create a change set, describe it and delete it before returning check mode outputs."""
+ stack_params['ChangeSetName'] = build_changeset_name(stack_params)
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ change_set = cfn.create_change_set(**stack_params)
+ for i in range(60): # total time 5 min
+ description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
+ if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
+ break
+ time.sleep(5)
+ else:
+ # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
+ module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
+
+ cfn.delete_change_set(ChangeSetName=change_set['Id'])
+
+ reason = description.get('StatusReason')
+
+ if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
+ return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
+ return {'changed': True, 'msg': reason, 'meta': description['Changes']}
+
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ module.fail_json_aws(err)
+
+
+def get_stack_facts(cfn, stack_name):
+ try:
+ stack_response = cfn.describe_stacks(StackName=stack_name)
+ stack_info = stack_response['Stacks'][0]
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ return None
+
+ # other error, bail.
+ raise err
+
+ if stack_response and stack_response.get('Stacks', None):
+ stacks = stack_response['Stacks']
+ if len(stacks):
+ stack_info = stacks[0]
+
+ return stack_info
+
+
+def main():
+ argument_spec = dict(
+ stack_name=dict(required=True),
+ template_parameters=dict(required=False, type='dict', default={}),
+ state=dict(default='present', choices=['present', 'absent']),
+ template=dict(default=None, required=False, type='path'),
+ notification_arns=dict(default=None, required=False),
+ stack_policy=dict(default=None, required=False),
+ disable_rollback=dict(default=False, type='bool'),
+ on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
+ create_timeout=dict(default=None, type='int'),
+ template_url=dict(default=None, required=False),
+ template_body=dict(default=None, required=False),
+ template_format=dict(removed_at_date='2022-06-01', removed_from_collection='amazon.aws'),
+ create_changeset=dict(default=False, type='bool'),
+ changeset_name=dict(default=None, required=False),
+ role_arn=dict(default=None, required=False),
+ tags=dict(default=None, type='dict'),
+ termination_protection=dict(default=None, type='bool'),
+ events_limit=dict(default=200, type='int'),
+ backoff_retries=dict(type='int', default=10, required=False),
+ backoff_delay=dict(type='int', default=3, required=False),
+ backoff_max_delay=dict(type='int', default=30, required=False),
+ capabilities=dict(type='list', elements='str', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['template_url', 'template', 'template_body'],
+ ['disable_rollback', 'on_create_failure']],
+ supports_check_mode=True
+ )
+
+ invalid_capabilities = []
+ user_capabilities = module.params.get('capabilities')
+ for user_cap in user_capabilities:
+ if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
+ invalid_capabilities.append(user_cap)
+
+ if invalid_capabilities:
+ module.fail_json(msg="Specified capabilities are invalid : %r,"
+ " please check documentation for valid capabilities" % invalid_capabilities)
+
+ # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
+ stack_params = {
+ 'Capabilities': user_capabilities,
+ 'ClientRequestToken': to_native(uuid.uuid4()),
+ }
+ state = module.params['state']
+ stack_params['StackName'] = module.params['stack_name']
+
+ if module.params['template'] is not None:
+ with open(module.params['template'], 'r') as template_fh:
+ stack_params['TemplateBody'] = template_fh.read()
+ elif module.params['template_body'] is not None:
+ stack_params['TemplateBody'] = module.params['template_body']
+ elif module.params['template_url'] is not None:
+ stack_params['TemplateURL'] = module.params['template_url']
+
+ if module.params.get('notification_arns'):
+ stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
+ else:
+ stack_params['NotificationARNs'] = []
+
+ # can't check the policy when verifying.
+ if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
+ with open(module.params['stack_policy'], 'r') as stack_policy_fh:
+ stack_params['StackPolicyBody'] = stack_policy_fh.read()
+
+ template_parameters = module.params['template_parameters']
+
+ stack_params['Parameters'] = []
+ for k, v in template_parameters.items():
+ if isinstance(v, dict):
+ # set parameter based on a dict to allow additional CFN Parameter Attributes
+ param = dict(ParameterKey=k)
+
+ if 'value' in v:
+ param['ParameterValue'] = str(v['value'])
+
+ if 'use_previous_value' in v and bool(v['use_previous_value']):
+ param['UsePreviousValue'] = True
+ param.pop('ParameterValue', None)
+
+ stack_params['Parameters'].append(param)
+ else:
+ # allow default k/v configuration to set a template parameter
+ stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
+
+ if isinstance(module.params.get('tags'), dict):
+ stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
+
+ if module.params.get('role_arn'):
+ stack_params['RoleARN'] = module.params['role_arn']
+
+ result = {}
+
+ cfn = module.client('cloudformation')
+
+ # Wrap the cloudformation client methods that this module uses with
+ # automatic backoff / retry for throttling error codes
+ backoff_wrapper = AWSRetry.jittered_backoff(
+ retries=module.params.get('backoff_retries'),
+ delay=module.params.get('backoff_delay'),
+ max_delay=module.params.get('backoff_max_delay')
+ )
+ cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
+ cfn.create_stack = backoff_wrapper(cfn.create_stack)
+ cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
+ cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
+ cfn.update_stack = backoff_wrapper(cfn.update_stack)
+ cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
+ cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
+ cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
+ if boto_supports_termination_protection(cfn):
+ cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
+
+ stack_info = get_stack_facts(cfn, stack_params['StackName'])
+
+ if module.check_mode:
+ if state == 'absent' and stack_info:
+ module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
+ elif state == 'absent' and not stack_info:
+ module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
+ elif state == 'present' and not stack_info:
+ module.exit_json(changed=True, msg='New stack would be created', meta=[])
+ else:
+ module.exit_json(**check_mode_changeset(module, stack_params, cfn))
+
+ if state == 'present':
+ if not stack_info:
+ result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
+ elif module.params.get('create_changeset'):
+ result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
+ else:
+ if module.params.get('termination_protection') is not None:
+ update_termination_protection(module, cfn, stack_params['StackName'],
+ bool(module.params.get('termination_protection')))
+ result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
+
+ # format the stack output
+
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if stack is not None:
+ if result.get('stack_outputs') is None:
+ # always define stack_outputs, but it may be empty
+ result['stack_outputs'] = {}
+ for output in stack.get('Outputs', []):
+ result['stack_outputs'][output['OutputKey']] = output['OutputValue']
+ stack_resources = []
+ reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
+ for res in reslist.get('StackResourceSummaries', []):
+ stack_resources.append({
+ "logical_resource_id": res['LogicalResourceId'],
+ "physical_resource_id": res.get('PhysicalResourceId', ''),
+ "resource_type": res['ResourceType'],
+ "last_updated_time": res['LastUpdatedTimestamp'],
+ "status": res['ResourceStatus'],
+ "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
+ })
+ result['stack_resources'] = stack_resources
+
+ elif state == 'absent':
+ # absent state is different because of the way delete_stack works.
+ # problem is it it doesn't give an error if stack isn't found
+ # so must describe the stack first
+
+ try:
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if not stack:
+ result = {'changed': False, 'output': 'Stack not found.'}
+ else:
+ if stack_params.get('RoleARN') is None:
+ cfn.delete_stack(StackName=stack_params['StackName'])
+ else:
+ cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
+ result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
+ stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ module.fail_json_aws(err)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_facts.py
new file mode 100644
index 00000000..0c34e8b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_facts.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation_info
+version_added: 1.0.0
+short_description: Obtain information about an AWS CloudFormation stack
+description:
+ - Gets information about an AWS CloudFormation stack.
+ - This module was called C(amazon.aws.cloudformation_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(amazon.aws.cloudformation_info) module no longer returns C(ansible_facts)!
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+author:
+ - Justin Menga (@jmenga)
+ - Kevin Coming (@waffie1)
+options:
+ stack_name:
+ description:
+ - The name or id of the CloudFormation stack. Gathers information on all stacks by default.
+ type: str
+ all_facts:
+ description:
+ - Get all stack information for the stack.
+ type: bool
+ default: false
+ stack_events:
+ description:
+ - Get stack events for the stack.
+ type: bool
+ default: false
+ stack_template:
+ description:
+ - Get stack template body for the stack.
+ type: bool
+ default: false
+ stack_resources:
+ description:
+ - Get stack resources for the stack.
+ type: bool
+ default: false
+ stack_policy:
+ description:
+ - Get stack policy for the stack.
+ type: bool
+ default: false
+ stack_change_sets:
+ description:
+ - Get stack change sets for the stack
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get summary information about a stack
+ amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ register: output
+
+- debug:
+ msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}"
+
+# When the module is called as cloudformation_facts, return values are published
+# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows.
+# Note that this is deprecated and will stop working in Ansible after 2021-12-01.
+
+- amazon.aws.cloudformation_facts:
+ stack_name: my-cloudformation-stack
+
+- debug:
+ msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}"
+
+# Get stack outputs, when you have the stack name available as a fact
+- set_fact:
+ stack_name: my-awesome-stack
+
+- amazon.aws.cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: my_stack
+
+- debug:
+ msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}"
+
+# Get all stack information about a stack
+- amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ all_facts: true
+
+# Get stack resource and stack policy information about a stack
+- amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ stack_resources: true
+ stack_policy: true
+
+# Fail if the stack doesn't exist
+- name: try to get facts about a stack but fail if it doesn't exist
+ amazon.aws.cloudformation_info:
+ stack_name: nonexistent-stack
+ all_facts: yes
+ failed_when: cloudformation['nonexistent-stack'] is undefined
+'''
+
+RETURN = '''
+stack_description:
+ description: Summary facts about the stack
+ returned: if the stack exists
+ type: dict
+stack_outputs:
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
+ output 'OutputValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com
+stack_parameters:
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
+ each parameter 'ParameterValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ DatabaseEngine: mysql
+ DatabasePassword: "***"
+stack_events:
+ description: All stack events for the stack
+ returned: only if all_facts or stack_events is true and the stack exists
+ type: list
+stack_policy:
+ description: Describes the stack policy for the stack
+ returned: only if all_facts or stack_policy is true and the stack exists
+ type: dict
+stack_template:
+ description: Describes the stack template for the stack
+ returned: only if all_facts or stack_template is true and the stack exists
+ type: dict
+stack_resource_list:
+ description: Describes stack resources for the stack
+ returned: only if all_facts or stack_resources is true and the stack exists
+ type: list
+stack_resources:
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
+ resource 'PhysicalResourceId' parameter
+ returned: only if all_facts or stack_resources is true and the stack exists
+ type: dict
+ sample:
+ AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7"
+ AutoScalingSecurityGroup: "sg-abcd1234"
+ ApplicationDatabase: "dazvlpr01xj55a"
+stack_change_sets:
+ description: A list of stack change sets. Each item in the list represents the details of a specific changeset
+
+ returned: only if all_facts or stack_change_sets is true and the stack exists
+ type: list
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_message
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+class CloudFormationServiceManager:
+ """Handles CloudFormation Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudformation')
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stacks_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_stacks')
+ return paginator.paginate(**kwargs).build_full_result()['Stacks']
+
+ def describe_stacks(self, stack_name=None):
+ try:
+ kwargs = {'StackName': stack_name} if stack_name else {}
+ response = self.describe_stacks_with_backoff(**kwargs)
+ if response is not None:
+ return response
+ self.module.fail_json(msg="Error describing stack(s) - an empty response was returned")
+ except is_boto3_error_message('does not exist'):
+ return {}
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Error describing stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_resources_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_stack_resources')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries']
+
+ def list_stack_resources(self, stack_name):
+ try:
+ return self.list_stack_resources_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_events_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('describe_stack_events')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents']
+
+ def describe_stack_events(self, stack_name):
+ try:
+ return self.describe_stack_events_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_change_sets_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_change_sets')
+ return paginator.paginate(StackName=stack_name).build_full_result()['Summaries']
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_change_set_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_change_set')
+ return paginator.paginate(**kwargs).build_full_result()
+
+ def describe_stack_change_sets(self, stack_name):
+ changes = []
+ try:
+ change_sets = self.list_stack_change_sets_with_backoff(stack_name)
+ for item in change_sets:
+ changes.append(self.describe_stack_change_set_with_backoff(
+ StackName=stack_name,
+ ChangeSetName=item['ChangeSetName']))
+ return changes
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_stack_policy_with_backoff(self, stack_name):
+ return self.client.get_stack_policy(StackName=stack_name)
+
+ def get_stack_policy(self, stack_name):
+ try:
+ response = self.get_stack_policy_with_backoff(stack_name)
+ stack_policy = response.get('StackPolicyBody')
+ if stack_policy:
+ return json.loads(stack_policy)
+ return dict()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_template_with_backoff(self, stack_name):
+ return self.client.get_template(StackName=stack_name)
+
+ def get_template(self, stack_name):
+ try:
+ response = self.get_template_with_backoff(stack_name)
+ return response.get('TemplateBody')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name)
+
+
+def to_dict(items, key, value):
+ ''' Transforms a list of items to a Key/Value dictionary '''
+ if items:
+ return dict(zip([i.get(key) for i in items], [i.get(value) for i in items]))
+ else:
+ return dict()
+
+
+def main():
+ argument_spec = dict(
+ stack_name=dict(),
+ all_facts=dict(required=False, default=False, type='bool'),
+ stack_policy=dict(required=False, default=False, type='bool'),
+ stack_events=dict(required=False, default=False, type='bool'),
+ stack_resources=dict(required=False, default=False, type='bool'),
+ stack_template=dict(required=False, default=False, type='bool'),
+ stack_change_sets=dict(required=False, default=False, type='bool'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ is_old_facts = module._name == 'cloudformation_facts'
+ if is_old_facts:
+ module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='amazon.aws')
+
+ service_mgr = CloudFormationServiceManager(module)
+
+ if is_old_facts:
+ result = {'ansible_facts': {'cloudformation': {}}}
+ else:
+ result = {'cloudformation': {}}
+
+ for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
+ facts = {'stack_description': stack_description}
+ stack_name = stack_description.get('StackName')
+
+ # Create stack output and stack parameter dictionaries
+ if facts['stack_description']:
+ facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
+ facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'),
+ 'ParameterKey', 'ParameterValue')
+ facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
+
+ # Create optional stack outputs
+ all_facts = module.params.get('all_facts')
+ if all_facts or module.params.get('stack_resources'):
+ facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
+ facts['stack_resources'] = to_dict(facts.get('stack_resource_list'),
+ 'LogicalResourceId', 'PhysicalResourceId')
+ if all_facts or module.params.get('stack_template'):
+ facts['stack_template'] = service_mgr.get_template(stack_name)
+ if all_facts or module.params.get('stack_policy'):
+ facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
+ if all_facts or module.params.get('stack_events'):
+ facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
+ if all_facts or module.params.get('stack_change_sets'):
+ facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
+
+ if is_old_facts:
+ result['ansible_facts']['cloudformation'][stack_name] = facts
+ else:
+ result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs',
+ 'stack_parameters',
+ 'stack_policy',
+ 'stack_resources',
+ 'stack_tags',
+ 'stack_template'))
+
+ module.exit_json(changed=False, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py
new file mode 100644
index 00000000..0c34e8b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation_info
+version_added: 1.0.0
+short_description: Obtain information about an AWS CloudFormation stack
+description:
+ - Gets information about an AWS CloudFormation stack.
+ - This module was called C(amazon.aws.cloudformation_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(amazon.aws.cloudformation_info) module no longer returns C(ansible_facts)!
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+author:
+ - Justin Menga (@jmenga)
+ - Kevin Coming (@waffie1)
+options:
+ stack_name:
+ description:
+ - The name or id of the CloudFormation stack. Gathers information on all stacks by default.
+ type: str
+ all_facts:
+ description:
+ - Get all stack information for the stack.
+ type: bool
+ default: false
+ stack_events:
+ description:
+ - Get stack events for the stack.
+ type: bool
+ default: false
+ stack_template:
+ description:
+ - Get stack template body for the stack.
+ type: bool
+ default: false
+ stack_resources:
+ description:
+ - Get stack resources for the stack.
+ type: bool
+ default: false
+ stack_policy:
+ description:
+ - Get stack policy for the stack.
+ type: bool
+ default: false
+ stack_change_sets:
+ description:
+ - Get stack change sets for the stack
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Get summary information about a stack
+ amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ register: output
+
+- debug:
+ msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}"
+
+# When the module is called as cloudformation_facts, return values are published
+# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows.
+# Note that this is deprecated and will stop working in Ansible after 2021-12-01.
+
+- amazon.aws.cloudformation_facts:
+ stack_name: my-cloudformation-stack
+
+- debug:
+ msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}"
+
+# Get stack outputs, when you have the stack name available as a fact
+- set_fact:
+ stack_name: my-awesome-stack
+
+- amazon.aws.cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: my_stack
+
+- debug:
+ msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}"
+
+# Get all stack information about a stack
+- amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ all_facts: true
+
+# Get stack resource and stack policy information about a stack
+- amazon.aws.cloudformation_info:
+ stack_name: my-cloudformation-stack
+ stack_resources: true
+ stack_policy: true
+
+# Fail if the stack doesn't exist
+- name: try to get facts about a stack but fail if it doesn't exist
+ amazon.aws.cloudformation_info:
+ stack_name: nonexistent-stack
+ all_facts: yes
+ failed_when: cloudformation['nonexistent-stack'] is undefined
+'''
+
+RETURN = '''
+stack_description:
+ description: Summary facts about the stack
+ returned: if the stack exists
+ type: dict
+stack_outputs:
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
+ output 'OutputValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com
+stack_parameters:
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
+ each parameter 'ParameterValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ DatabaseEngine: mysql
+ DatabasePassword: "***"
+stack_events:
+ description: All stack events for the stack
+ returned: only if all_facts or stack_events is true and the stack exists
+ type: list
+stack_policy:
+ description: Describes the stack policy for the stack
+ returned: only if all_facts or stack_policy is true and the stack exists
+ type: dict
+stack_template:
+ description: Describes the stack template for the stack
+ returned: only if all_facts or stack_template is true and the stack exists
+ type: dict
+stack_resource_list:
+ description: Describes stack resources for the stack
+ returned: only if all_facts or stack_resources is true and the stack exists
+ type: list
+stack_resources:
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
+ resource 'PhysicalResourceId' parameter
+ returned: only if all_facts or stack_resources is true and the stack exists
+ type: dict
+ sample:
+ AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7"
+ AutoScalingSecurityGroup: "sg-abcd1234"
+ ApplicationDatabase: "dazvlpr01xj55a"
+stack_change_sets:
+ description: A list of stack change sets. Each item in the list represents the details of a specific changeset
+
+ returned: only if all_facts or stack_change_sets is true and the stack exists
+ type: list
+'''
+
+import json
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_message
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+class CloudFormationServiceManager:
+ """Handles CloudFormation Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudformation')
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stacks_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_stacks')
+ return paginator.paginate(**kwargs).build_full_result()['Stacks']
+
+ def describe_stacks(self, stack_name=None):
+ try:
+ kwargs = {'StackName': stack_name} if stack_name else {}
+ response = self.describe_stacks_with_backoff(**kwargs)
+ if response is not None:
+ return response
+ self.module.fail_json(msg="Error describing stack(s) - an empty response was returned")
+ except is_boto3_error_message('does not exist'):
+ return {}
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Error describing stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_resources_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_stack_resources')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries']
+
+ def list_stack_resources(self, stack_name):
+ try:
+ return self.list_stack_resources_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_events_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('describe_stack_events')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents']
+
+ def describe_stack_events(self, stack_name):
+ try:
+ return self.describe_stack_events_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_change_sets_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_change_sets')
+ return paginator.paginate(StackName=stack_name).build_full_result()['Summaries']
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_change_set_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_change_set')
+ return paginator.paginate(**kwargs).build_full_result()
+
+ def describe_stack_change_sets(self, stack_name):
+ changes = []
+ try:
+ change_sets = self.list_stack_change_sets_with_backoff(stack_name)
+ for item in change_sets:
+ changes.append(self.describe_stack_change_set_with_backoff(
+ StackName=stack_name,
+ ChangeSetName=item['ChangeSetName']))
+ return changes
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_stack_policy_with_backoff(self, stack_name):
+ return self.client.get_stack_policy(StackName=stack_name)
+
+ def get_stack_policy(self, stack_name):
+ try:
+ response = self.get_stack_policy_with_backoff(stack_name)
+ stack_policy = response.get('StackPolicyBody')
+ if stack_policy:
+ return json.loads(stack_policy)
+ return dict()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_template_with_backoff(self, stack_name):
+ return self.client.get_template(StackName=stack_name)
+
+ def get_template(self, stack_name):
+ try:
+ response = self.get_template_with_backoff(stack_name)
+ return response.get('TemplateBody')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name)
+
+
+def to_dict(items, key, value):
+ ''' Transforms a list of items to a Key/Value dictionary '''
+ if items:
+ return dict(zip([i.get(key) for i in items], [i.get(value) for i in items]))
+ else:
+ return dict()
+
+
+def main():
+ argument_spec = dict(
+ stack_name=dict(),
+ all_facts=dict(required=False, default=False, type='bool'),
+ stack_policy=dict(required=False, default=False, type='bool'),
+ stack_events=dict(required=False, default=False, type='bool'),
+ stack_resources=dict(required=False, default=False, type='bool'),
+ stack_template=dict(required=False, default=False, type='bool'),
+ stack_change_sets=dict(required=False, default=False, type='bool'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ is_old_facts = module._name == 'cloudformation_facts'
+ if is_old_facts:
+ module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', "
+ "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='amazon.aws')
+
+ service_mgr = CloudFormationServiceManager(module)
+
+ if is_old_facts:
+ result = {'ansible_facts': {'cloudformation': {}}}
+ else:
+ result = {'cloudformation': {}}
+
+ for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
+ facts = {'stack_description': stack_description}
+ stack_name = stack_description.get('StackName')
+
+ # Create stack output and stack parameter dictionaries
+ if facts['stack_description']:
+ facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
+ facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'),
+ 'ParameterKey', 'ParameterValue')
+ facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
+
+ # Create optional stack outputs
+ all_facts = module.params.get('all_facts')
+ if all_facts or module.params.get('stack_resources'):
+ facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
+ facts['stack_resources'] = to_dict(facts.get('stack_resource_list'),
+ 'LogicalResourceId', 'PhysicalResourceId')
+ if all_facts or module.params.get('stack_template'):
+ facts['stack_template'] = service_mgr.get_template(stack_name)
+ if all_facts or module.params.get('stack_policy'):
+ facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
+ if all_facts or module.params.get('stack_events'):
+ facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
+ if all_facts or module.params.get('stack_change_sets'):
+ facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
+
+ if is_old_facts:
+ result['ansible_facts']['cloudformation'][stack_name] = facts
+ else:
+ result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs',
+ 'stack_parameters',
+ 'stack_policy',
+ 'stack_resources',
+ 'stack_tags',
+ 'stack_template'))
+
+ module.exit_json(changed=False, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2.py
new file mode 100644
index 00000000..990a7e69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2.py
@@ -0,0 +1,1740 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2
+version_added: 1.0.0
+short_description: create, terminate, start or stop an instance in ec2
+description:
+ - Creates or terminates ec2 instances.
+ - >
+ Note: This module uses the older boto Python module to interact with the EC2 API.
+ M(amazon.aws.ec2) will still receive bug fixes, but no new features.
+ Consider using the M(amazon.aws.ec2_instance) module instead.
+ If M(amazon.aws.ec2_instance) does not support a feature you need that is available in M(amazon.aws.ec2), please
+ file a feature request.
+options:
+ key_name:
+ description:
+ - Key pair to use on the instance.
+ - The SSH key must already exist in AWS in order to use this argument.
+ - Keys can be created / deleted using the M(amazon.aws.ec2_key) module.
+ aliases: ['keypair']
+ type: str
+ id:
+ description:
+ - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
+ - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
+ - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ type: str
+ group:
+ description:
+ - Security group (or list of groups) to use with the instance.
+ aliases: [ 'groups' ]
+ type: list
+ elements: str
+ group_id:
+ description:
+ - Security group id (or list of ids) to use with the instance.
+ type: list
+ elements: str
+ zone:
+ description:
+ - AWS availability zone in which to launch the instance.
+ aliases: [ 'aws_zone', 'ec2_zone' ]
+ type: str
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
+ - Required when creating a new instance.
+ type: str
+ aliases: ['type']
+ tenancy:
+ description:
+ - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC.
+ - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well.
+ - Dedicated tenancy is not available for EC2 "micro" instances.
+ default: default
+ choices: [ "default", "dedicated" ]
+ type: str
+ spot_price:
+ description:
+ - Maximum spot price to bid. If not set, a regular on-demand instance is requested.
+ - A spot request is made with this maximum bid. When it is filled, the instance is started.
+ type: str
+ spot_type:
+ description:
+ - The type of spot request.
+ - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again.
+ default: "one-time"
+ choices: [ "one-time", "persistent" ]
+ type: str
+ image:
+ description:
+ - I(ami) ID to use for the instance.
+ - Required when I(state=present).
+ type: str
+ kernel:
+ description:
+ - Kernel eki to use for the instance.
+ type: str
+ ramdisk:
+ description:
+ - Ramdisk eri to use for the instance.
+ type: str
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ - Does not wait for SSH, see the 'wait_for_connection' example for details.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 300
+ type: int
+ spot_wait_timeout:
+ description:
+ - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan.
+ default: 600
+ type: int
+ count:
+ description:
+ - Number of instances to launch.
+ default: 1
+ type: int
+ monitoring:
+ description:
+ - Enable detailed monitoring (CloudWatch) for the instance.
+ type: bool
+ default: false
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the EC2 instance.
+ type: str
+ instance_tags:
+ description:
+ - >
+ A hash/dictionary of tags to add to the new instance or for
+ instances to start/stop by tag. For example C({"key":"value"}) or
+ C({"key":"value","key2":"value2"}).
+ type: dict
+ placement_group:
+ description:
+ - Placement group for the instance when using EC2 Clustered Compute.
+ type: str
+ vpc_subnet_id:
+ description:
+ - The subnet ID in which to launch the instance (VPC).
+ type: str
+ assign_public_ip:
+ description:
+ - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+.
+ type: bool
+ private_ip:
+ description:
+ - The private ip address to assign the instance (from the vpc subnet).
+ type: str
+ instance_profile_name:
+ description:
+ - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+.
+ type: str
+ instance_ids:
+ description:
+ - "list of instance ids, currently used for states: absent, running, stopped"
+ aliases: ['instance_id']
+ type: list
+ elements: str
+ source_dest_check:
+ description:
+ - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers).
+ When initially creating an instance the EC2 API defaults this to C(True).
+ type: bool
+ termination_protection:
+ description:
+ - Enable or Disable the Termination Protection.
+ - Defaults to C(false).
+ type: bool
+ instance_initiated_shutdown_behavior:
+ description:
+ - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store.
+ images (which require termination on shutdown).
+ default: 'stop'
+ choices: [ "stop", "terminate" ]
+ type: str
+ state:
+ description:
+ - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2.
+ - When I(state=absent), I(instance_ids) is required.
+ - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required.
+ default: 'present'
+ choices: ['absent', 'present', 'restarted', 'running', 'stopped']
+ type: str
+ volumes:
+ description:
+ - A list of hash/dictionaries of volumes to add to the new instance.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ type: str
+ required: true
+ description:
+ - A name for the device (For example C(/dev/sda)).
+ delete_on_termination:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be automatically deleted when the instance is terminated.
+ ephemeral:
+ type: str
+ description:
+ - Whether the volume should be ephemeral.
+ - Data on ephemeral volumes is lost when the instance is stopped.
+ - Mutually exclusive with the I(snapshot) parameter.
+ encrypted:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
+ snapshot:
+ type: str
+ description:
+ - The ID of an EBS snapshot to copy when creating the volume.
+ - Mutually exclusive with the I(ephemeral) parameter.
+ volume_type:
+ type: str
+ description:
+ - The type of volume to create.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
+ volume_size:
+ type: int
+ description:
+ - The size of the volume (in GiB).
+ iops:
+ type: int
+ description:
+ - The number of IOPS per second to provision for the volume.
+ - Required when I(volume_type=io1).
+ ebs_optimized:
+ description:
+ - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ default: false
+ type: bool
+ exact_count:
+ description:
+ - An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
+ Instances are either created or terminated based on this value.
+ type: int
+ count_tag:
+ description:
+ - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
+ that are tagged with C(class=webserver). The specified tag must already exist or be passed in as the I(instance_tags) option.
+ type: raw
+ network_interfaces:
+ description:
+ - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
+ none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are
+ for creating a new network interface at launch.)
+ aliases: ['network_interface']
+ type: list
+ elements: str
+ spot_launch_group:
+ description:
+ - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group).
+ type: str
+author:
+ - "Tim Gerla (@tgerla)"
+ - "Lester Wade (@lwade)"
+ - "Seth Vidal (@skvidal)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic provisioning example
+- amazon.aws.ec2:
+ key_name: mykey
+ instance_type: t2.micro
+ image: ami-123456
+ wait: yes
+ group: webserver
+ count: 3
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Advanced example with tagging and CloudWatch
+- amazon.aws.ec2:
+ key_name: mykey
+ group: databases
+ instance_type: t2.micro
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ instance_tags:
+ db: postgres
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Single instance with additional IOPS volume from snapshot and volume delete on termination
+- amazon.aws.ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: c3.medium
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ volumes:
+ - device_name: /dev/sdb
+ snapshot: snap-abcdef12
+ volume_type: io1
+ iops: 1000
+ volume_size: 100
+ delete_on_termination: true
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Single instance with ssd gp2 root volume
+- amazon.aws.ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: c3.medium
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ volumes:
+ - device_name: /dev/xvda
+ volume_type: gp2
+ volume_size: 8
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ count_tag:
+ Name: dbserver
+ exact_count: 1
+
+# Multiple groups example
+- amazon.aws.ec2:
+ key_name: mykey
+ group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
+ instance_type: m1.large
+ image: ami-6e649707
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ instance_tags:
+ db: postgres
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Multiple instances with additional volume from snapshot
+- amazon.aws.ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: m1.large
+ image: ami-6e649707
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ volumes:
+ - device_name: /dev/sdb
+ snapshot: snap-abcdef12
+ volume_size: 10
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Dedicated tenancy example
+- amazon.aws.ec2:
+ assign_public_ip: yes
+ group_id: sg-1dc53f72
+ key_name: mykey
+ image: ami-6e649707
+ instance_type: m1.small
+ tenancy: dedicated
+ vpc_subnet_id: subnet-29e63245
+ wait: yes
+
+# Spot instance example
+- amazon.aws.ec2:
+ spot_price: 0.24
+ spot_wait_timeout: 600
+ keypair: mykey
+ group_id: sg-1dc53f72
+ instance_type: m1.small
+ image: ami-6e649707
+ wait: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ spot_launch_group: report_generators
+ instance_initiated_shutdown_behavior: terminate
+
+# Examples using pre-existing network interfaces
+- amazon.aws.ec2:
+ key_name: mykey
+ instance_type: t2.small
+ image: ami-f005ba11
+ network_interface: eni-deadbeef
+
+- amazon.aws.ec2:
+ key_name: mykey
+ instance_type: t2.small
+ image: ami-f005ba11
+ network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
+
+# Launch instances, runs some tasks
+# and then terminate them
+
+- name: Create a sandbox instance
+ hosts: localhost
+ gather_facts: False
+ vars:
+ keypair: my_keypair
+ instance_type: m1.small
+ security_group: my_securitygroup
+ image: my_ami_id
+ region: us-east-1
+ tasks:
+ - name: Launch instance
+ amazon.aws.ec2:
+ key_name: "{{ keypair }}"
+ group: "{{ security_group }}"
+ instance_type: "{{ instance_type }}"
+ image: "{{ image }}"
+ wait: true
+ region: "{{ region }}"
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ register: ec2
+
+ - name: Add new instance to host group
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: launched
+ loop: "{{ ec2.instances }}"
+
+ - name: Wait for SSH to come up
+ delegate_to: "{{ item.public_dns_name }}"
+ wait_for_connection:
+ delay: 60
+ timeout: 320
+ loop: "{{ ec2.instances }}"
+
+- name: Configure instance(s)
+ hosts: launched
+ become: True
+ gather_facts: True
+ roles:
+ - my_awesome_role
+ - my_awesome_test
+
+- name: Terminate instances
+ hosts: localhost
+ tasks:
+ - name: Terminate instances that were previously launched
+ amazon.aws.ec2:
+ state: 'absent'
+ instance_ids: '{{ ec2.instance_ids }}'
+
+# Start a few existing instances, run some tasks
+# and stop the instances
+
+- name: Start sandbox instances
+ hosts: localhost
+ gather_facts: false
+ vars:
+ instance_ids:
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ region: us-east-1
+ tasks:
+ - name: Start the sandbox instances
+ amazon.aws.ec2:
+ instance_ids: '{{ instance_ids }}'
+ region: '{{ region }}'
+ state: running
+ wait: True
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ roles:
+ - do_neat_stuff
+ - do_more_neat_stuff
+
+- name: Stop sandbox instances
+ hosts: localhost
+ gather_facts: false
+ vars:
+ instance_ids:
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ region: us-east-1
+ tasks:
+ - name: Stop the sandbox instances
+ amazon.aws.ec2:
+ instance_ids: '{{ instance_ids }}'
+ region: '{{ region }}'
+ state: stopped
+ wait: True
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# Start stopped instances specified by tag
+#
+- amazon.aws.ec2:
+ instance_tags:
+ Name: ExtraPower
+ state: running
+
+#
+# Restart instances specified by tag
+#
+- amazon.aws.ec2:
+ instance_tags:
+ Name: ExtraPower
+ state: restarted
+
+#
+# Enforce that 5 instances with a tag "foo" are running
+# (Highly recommended!)
+#
+
+- amazon.aws.ec2:
+ key_name: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ instance_tags:
+ foo: bar
+ exact_count: 5
+ count_tag: foo
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
+#
+
+- amazon.aws.ec2:
+ key_name: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ instance_tags:
+ Name: database
+ dbtype: postgres
+ exact_count: 5
+ count_tag:
+ Name: database
+ dbtype: postgres
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# count_tag complex argument examples
+#
+
+ # instances with tag foo
+- amazon.aws.ec2:
+ count_tag:
+ foo:
+
+ # instances with tag foo=bar
+- amazon.aws.ec2:
+ count_tag:
+ foo: bar
+
+ # instances with tags foo=bar & baz
+- amazon.aws.ec2:
+ count_tag:
+ foo: bar
+ baz:
+
+ # instances with tags foo & bar & baz=bang
+- amazon.aws.ec2:
+ count_tag:
+ - foo
+ - bar
+ - baz: bang
+
+'''
+
+import time
+import datetime
+from ast import literal_eval
+from distutils.version import LooseVersion
+
+try:
+ import boto.ec2
+ from boto.ec2.blockdevicemapping import BlockDeviceType
+ from boto.ec2.blockdevicemapping import BlockDeviceMapping
+ from boto.exception import EC2ResponseError
+ from boto import connect_ec2_endpoint
+ from boto import connect_vpc
+except ImportError:
+ pass # Taken care of by ec2.HAS_BOTO
+
+from ansible.module_utils.six import get_function_code
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils._text import to_text
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import HAS_BOTO
+from ..module_utils.ec2 import ec2_connect
+from ..module_utils.ec2 import get_aws_connection_info
+
+
+def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
+
+ # get reservations for instances that match tag(s) and are in the desired state
+ state = module.params.get('state')
+ if state not in ['running', 'stopped']:
+ state = None
+ reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone)
+
+ instances = []
+ for res in reservations:
+ if hasattr(res, 'instances'):
+ for inst in res.instances:
+ if inst.state == 'terminated' or inst.state == 'shutting-down':
+ continue
+ instances.append(inst)
+
+ return reservations, instances
+
+
+def _set_none_to_blank(dictionary):
+ result = dictionary
+ for k in result:
+ if isinstance(result[k], dict):
+ result[k] = _set_none_to_blank(result[k])
+ elif not result[k]:
+ result[k] = ""
+ return result
+
+
+def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
+ # TODO: filters do not work with tags that have underscores
+ filters = dict()
+
+ vpc_subnet_id = module.params.get('vpc_subnet_id')
+ vpc_id = None
+ if vpc_subnet_id:
+ filters.update({"subnet-id": vpc_subnet_id})
+ if vpc:
+ vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
+
+ if vpc_id:
+ filters.update({"vpc-id": vpc_id})
+
+ if tags is not None:
+
+ if isinstance(tags, str):
+ try:
+ tags = literal_eval(tags)
+ except Exception:
+ pass
+
+ # if not a string type, convert and make sure it's a text string
+ if isinstance(tags, int):
+ tags = to_text(tags)
+
+ # if string, we only care that a tag of that name exists
+ if isinstance(tags, str):
+ filters.update({"tag-key": tags})
+
+ # if list, append each item to filters
+ if isinstance(tags, list):
+ for x in tags:
+ if isinstance(x, dict):
+ x = _set_none_to_blank(x)
+ filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
+ else:
+ filters.update({"tag-key": x})
+
+ # if dict, add the key and value to the filter
+ if isinstance(tags, dict):
+ tags = _set_none_to_blank(tags)
+ filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
+
+ # lets check to see if the filters dict is empty, if so then stop
+ if not filters:
+ module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags))
+
+ if state:
+ # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
+ filters.update({'instance-state-name': state})
+
+ if zone:
+ filters.update({'availability-zone': zone})
+
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+
+ results = ec2.get_all_instances(filters=filters)
+
+ return results
+
+
+def get_instance_info(inst):
+ """
+ Retrieves instance information from an instance
+ ID and returns it as a dictionary
+ """
+ instance_info = {'id': inst.id,
+ 'ami_launch_index': inst.ami_launch_index,
+ 'private_ip': inst.private_ip_address,
+ 'private_dns_name': inst.private_dns_name,
+ 'public_ip': inst.ip_address,
+ 'dns_name': inst.dns_name,
+ 'public_dns_name': inst.public_dns_name,
+ 'state_code': inst.state_code,
+ 'architecture': inst.architecture,
+ 'image_id': inst.image_id,
+ 'key_name': inst.key_name,
+ 'placement': inst.placement,
+ 'region': inst.placement[:-1],
+ 'kernel': inst.kernel,
+ 'ramdisk': inst.ramdisk,
+ 'launch_time': inst.launch_time,
+ 'instance_type': inst.instance_type,
+ 'root_device_type': inst.root_device_type,
+ 'root_device_name': inst.root_device_name,
+ 'state': inst.state,
+ 'hypervisor': inst.hypervisor,
+ 'tags': inst.tags,
+ 'groups': dict((group.id, group.name) for group in inst.groups),
+ }
+ try:
+ instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
+ except AttributeError:
+ instance_info['virtualization_type'] = None
+
+ try:
+ instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
+ except AttributeError:
+ instance_info['ebs_optimized'] = False
+
+ try:
+ bdm_dict = {}
+ bdm = getattr(inst, 'block_device_mapping')
+ for device_name in bdm.keys():
+ bdm_dict[device_name] = {
+ 'status': bdm[device_name].status,
+ 'volume_id': bdm[device_name].volume_id,
+ 'delete_on_termination': bdm[device_name].delete_on_termination
+ }
+ instance_info['block_device_mapping'] = bdm_dict
+ except AttributeError:
+ instance_info['block_device_mapping'] = False
+
+ try:
+ instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
+ except AttributeError:
+ instance_info['tenancy'] = 'default'
+
+ return instance_info
+
+
+def boto_supports_associate_public_ip_address(ec2):
+ """
+ Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
+ class. Added in Boto 2.13.0
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if Boto library accepts associate_public_ip_address argument, else false
+ """
+
+ try:
+ network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
+ getattr(network_interface, "associate_public_ip_address")
+ return True
+ except AttributeError:
+ return False
+
+
+def boto_supports_profile_name_arg(ec2):
+ """
+ Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if Boto library accept instance_profile_name argument, else false
+ """
+ run_instances_method = getattr(ec2, 'run_instances')
+ return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
+
+
+def boto_supports_volume_encryption():
+ """
+ Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
+
+
+def create_block_device(module, ec2, volume):
+ # Not aware of a way to determine this programatically
+ # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
+ MAX_IOPS_TO_SIZE_RATIO = 30
+
+ volume_type = volume.get('volume_type')
+
+ if 'snapshot' not in volume and 'ephemeral' not in volume:
+ if 'volume_size' not in volume:
+ module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
+ if 'snapshot' in volume:
+ if volume_type == 'io1' and 'iops' not in volume:
+ module.fail_json(msg='io1 volumes must have an iops value set')
+ if 'iops' in volume:
+ snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
+ size = volume.get('volume_size', snapshot.volume_size)
+ if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * int(size):
+ module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
+ if 'ephemeral' in volume:
+ if 'snapshot' in volume:
+ module.fail_json(msg='Cannot set both ephemeral and snapshot')
+ if boto_supports_volume_encryption():
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume_type,
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'),
+ encrypted=volume.get('encrypted', None))
+ else:
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume_type,
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'))
+
+
+def boto_supports_param_in_spot_request(ec2, param):
+ """
+ Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ method = getattr(ec2, 'request_spot_instances')
+ return param in get_function_code(method).co_varnames
+
+
+def await_spot_requests(module, ec2, spot_requests, count):
+ """
+ Wait for a group of spot requests to be fulfilled, or fail.
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
+ count: Total number of instances to be created by the spot requests
+
+ Returns:
+ list of instance ID's created by the spot request(s)
+ """
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ wait_complete = time.time() + spot_wait_timeout
+
+ spot_req_inst_ids = dict()
+ while time.time() < wait_complete:
+ reqs = ec2.get_all_spot_instance_requests()
+ for sirb in spot_requests:
+ if sirb.id in spot_req_inst_ids:
+ continue
+ for sir in reqs:
+ if sir.id != sirb.id:
+ continue # this is not our spot instance
+ if sir.instance_id is not None:
+ spot_req_inst_ids[sirb.id] = sir.instance_id
+ elif sir.state == 'open':
+ continue # still waiting, nothing to do here
+ elif sir.state == 'active':
+ continue # Instance is created already, nothing to do here
+ elif sir.state == 'failed':
+ module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
+ sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+ elif sir.state == 'cancelled':
+ module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
+ elif sir.state == 'closed':
+ # instance is terminating or marked for termination
+ # this may be intentional on the part of the operator,
+ # or it may have been terminated by AWS due to capacity,
+ # price, or group constraints in this case, we'll fail
+ # the module if the reason for the state is anything
+ # other than termination by user. Codes are documented at
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
+ if sir.status.code == 'instance-terminated-by-user':
+ # do nothing, since the user likely did this on purpose
+ pass
+ else:
+ spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
+ module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+
+ if len(spot_req_inst_ids) < count:
+ time.sleep(5)
+ else:
+ return list(spot_req_inst_ids.values())
+ module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
+
+
+def enforce_count(module, ec2, vpc):
+
+ exact_count = module.params.get('exact_count')
+ count_tag = module.params.get('count_tag')
+ zone = module.params.get('zone')
+
+ # fail here if the exact count was specified without filtering
+ # on a tag, as this may lead to a undesired removal of instances
+ if exact_count and count_tag is None:
+ module.fail_json(msg="you must use the 'count_tag' option with exact_count")
+
+ reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
+
+ changed = None
+ checkmode = False
+ instance_dict_array = []
+ changed_instance_ids = None
+
+ if len(instances) == exact_count:
+ changed = False
+ elif len(instances) < exact_count:
+ changed = True
+ to_create = exact_count - len(instances)
+ if not checkmode:
+ (instance_dict_array, changed_instance_ids, changed) \
+ = create_instances(module, ec2, vpc, override_count=to_create)
+
+ for inst in instance_dict_array:
+ instances.append(inst)
+ elif len(instances) > exact_count:
+ changed = True
+ to_remove = len(instances) - exact_count
+ if not checkmode:
+ all_instance_ids = sorted([x.id for x in instances])
+ remove_ids = all_instance_ids[0:to_remove]
+
+ instances = [x for x in instances if x.id not in remove_ids]
+
+ (changed, instance_dict_array, changed_instance_ids) \
+ = terminate_instances(module, ec2, remove_ids)
+ terminated_list = []
+ for inst in instance_dict_array:
+ inst['state'] = "terminated"
+ terminated_list.append(inst)
+ instance_dict_array = terminated_list
+
+ # ensure all instances are dictionaries
+ all_instances = []
+ for inst in instances:
+
+ if not isinstance(inst, dict):
+ warn_if_public_ip_assignment_changed(module, inst)
+ inst = get_instance_info(inst)
+ all_instances.append(inst)
+
+ return (all_instances, instance_dict_array, changed_instance_ids, changed)
+
+
+def create_instances(module, ec2, vpc, override_count=None):
+ """
+ Creates new instances
+
+ module : AnsibleAWSModule object
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched
+ """
+
+ key_name = module.params.get('key_name')
+ id = module.params.get('id')
+ group_name = module.params.get('group')
+ group_id = module.params.get('group_id')
+ zone = module.params.get('zone')
+ instance_type = module.params.get('instance_type')
+ tenancy = module.params.get('tenancy')
+ spot_price = module.params.get('spot_price')
+ spot_type = module.params.get('spot_type')
+ image = module.params.get('image')
+ if override_count:
+ count = override_count
+ else:
+ count = module.params.get('count')
+ monitoring = module.params.get('monitoring')
+ kernel = module.params.get('kernel')
+ ramdisk = module.params.get('ramdisk')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ placement_group = module.params.get('placement_group')
+ user_data = module.params.get('user_data')
+ instance_tags = module.params.get('instance_tags')
+ vpc_subnet_id = module.params.get('vpc_subnet_id')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ private_ip = module.params.get('private_ip')
+ instance_profile_name = module.params.get('instance_profile_name')
+ volumes = module.params.get('volumes')
+ ebs_optimized = module.params.get('ebs_optimized')
+ exact_count = module.params.get('exact_count')
+ count_tag = module.params.get('count_tag')
+ source_dest_check = module.boolean(module.params.get('source_dest_check'))
+ termination_protection = module.boolean(module.params.get('termination_protection'))
+ network_interfaces = module.params.get('network_interfaces')
+ spot_launch_group = module.params.get('spot_launch_group')
+ instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
+
+ vpc_id = None
+ if vpc_subnet_id:
+ if not vpc:
+ module.fail_json(msg="region must be specified")
+ else:
+ vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
+ else:
+ vpc_id = None
+
+ try:
+ # Here we try to lookup the group id from the security group name - if group is set.
+ if group_name:
+ if vpc_id:
+ grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
+ else:
+ grp_details = ec2.get_all_security_groups()
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+ unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
+ if len(unmatched) > 0:
+ module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
+ group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
+ # Now we try to lookup the group id testing if group exists.
+ elif group_id:
+ # wrap the group_id in a list if it's not one already
+ if isinstance(group_id, string_types):
+ group_id = [group_id]
+ grp_details = ec2.get_all_security_groups(group_ids=group_id)
+ group_name = [grp_item.name for grp_item in grp_details]
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json_aws(e, msg='Unable to authenticate to AWS')
+
+ # Lookup any instances that much our run id.
+
+ running_instances = []
+ count_remaining = int(count)
+
+ if id is not None:
+ filter_dict = {'client-token': id, 'instance-state-name': 'running'}
+ previous_reservations = ec2.get_all_instances(None, filter_dict)
+ for res in previous_reservations:
+ for prev_instance in res.instances:
+ running_instances.append(prev_instance)
+ count_remaining = count_remaining - len(running_instances)
+
+ # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
+
+ if count_remaining == 0:
+ changed = False
+ else:
+ changed = True
+ try:
+ params = {'image_id': image,
+ 'key_name': key_name,
+ 'monitoring_enabled': monitoring,
+ 'placement': zone,
+ 'instance_type': instance_type,
+ 'kernel_id': kernel,
+ 'ramdisk_id': ramdisk}
+ if user_data is not None:
+ params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict')
+
+ if ebs_optimized:
+ params['ebs_optimized'] = ebs_optimized
+
+ # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
+ if not spot_price:
+ params['tenancy'] = tenancy
+
+ if boto_supports_profile_name_arg(ec2):
+ params['instance_profile_name'] = instance_profile_name
+ else:
+ if instance_profile_name is not None:
+ module.fail_json(
+ msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
+
+ if assign_public_ip is not None:
+ if not boto_supports_associate_public_ip_address(ec2):
+ module.fail_json(
+ msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
+ elif not vpc_subnet_id:
+ module.fail_json(
+ msg="assign_public_ip only available with vpc_subnet_id")
+
+ else:
+ if private_ip:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ private_ip_address=private_ip,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ else:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
+ params['network_interfaces'] = interfaces
+ else:
+ if network_interfaces:
+ if isinstance(network_interfaces, string_types):
+ network_interfaces = [network_interfaces]
+ interfaces = []
+ for i, network_interface_id in enumerate(network_interfaces):
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ network_interface_id=network_interface_id,
+ device_index=i)
+ interfaces.append(interface)
+ params['network_interfaces'] = \
+ boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
+ else:
+ params['subnet_id'] = vpc_subnet_id
+ if vpc_subnet_id:
+ params['security_group_ids'] = group_id
+ else:
+ params['security_groups'] = group_name
+
+ if volumes:
+ bdm = BlockDeviceMapping()
+ for volume in volumes:
+ if 'device_name' not in volume:
+ module.fail_json(msg='Device name must be set for volume')
+ # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0
+ # to be a signal not to create this volume
+ if 'volume_size' not in volume or int(volume['volume_size']) > 0:
+ bdm[volume['device_name']] = create_block_device(module, ec2, volume)
+
+ params['block_device_map'] = bdm
+
+ # check to see if we're using spot pricing first before starting instances
+ if not spot_price:
+ if assign_public_ip is not None and private_ip:
+ params.update(
+ dict(
+ min_count=count_remaining,
+ max_count=count_remaining,
+ client_token=id,
+ placement_group=placement_group,
+ )
+ )
+ else:
+ params.update(
+ dict(
+ min_count=count_remaining,
+ max_count=count_remaining,
+ client_token=id,
+ placement_group=placement_group,
+ private_ip_address=private_ip,
+ )
+ )
+
+ # For ordinary (not spot) instances, we can select 'stop'
+ # (the default) or 'terminate' here.
+ params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
+
+ try:
+ res = ec2.run_instances(**params)
+ except boto.exception.EC2ResponseError as e:
+ if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
+ "InvalidParameterCombination" == e.error_code):
+ params['instance_initiated_shutdown_behavior'] = 'terminate'
+ res = ec2.run_instances(**params)
+ else:
+ raise
+
+ instids = [i.id for i in res.instances]
+ while True:
+ try:
+ ec2.get_all_instances(instids)
+ break
+ except boto.exception.EC2ResponseError as e:
+ if e.error_code == 'InvalidInstanceID.NotFound':
+ # there's a race between start and get an instance
+ continue
+ else:
+ module.fail_json_aws(e)
+
+ # The instances returned through ec2.run_instances above can be in
+ # terminated state due to idempotency. See commit 7f11c3d for a complete
+ # explanation.
+ terminated_instances = [
+ str(instance.id) for instance in res.instances if instance.state == 'terminated'
+ ]
+ if terminated_instances:
+ module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
+ "were created previously but have since been terminated - " +
+ "use a (possibly different) 'instanceid' parameter")
+
+ else:
+ if private_ip:
+ module.fail_json(
+ msg='private_ip only available with on-demand (non-spot) instances')
+ if boto_supports_param_in_spot_request(ec2, 'placement_group'):
+ params['placement_group'] = placement_group
+ elif placement_group:
+ module.fail_json(
+ msg="placement_group parameter requires Boto version 2.3.0 or higher.")
+
+ # You can't tell spot instances to 'stop'; they will always be
+ # 'terminate'd. For convenience, we'll ignore the latter value.
+ if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
+ module.fail_json(
+ msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
+
+ if spot_launch_group and isinstance(spot_launch_group, string_types):
+ params['launch_group'] = spot_launch_group
+
+ params.update(dict(
+ count=count_remaining,
+ type=spot_type,
+ ))
+
+ # Set spot ValidUntil
+ # ValidUntil -> (timestamp). The end date of the request, in
+ # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z).
+ utc_valid_until = (
+ datetime.datetime.utcnow()
+ + datetime.timedelta(seconds=spot_wait_timeout))
+ params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z')
+
+ res = ec2.request_spot_instances(spot_price, **params)
+
+ # Now we have to do the intermediate waiting
+ if wait:
+ instids = await_spot_requests(module, ec2, res, count)
+ else:
+ instids = []
+ except boto.exception.BotoServerError as e:
+ module.fail_json_aws(e, msg='Instance creation failed')
+
+ # wait here until the instances are up
+ num_running = 0
+ wait_timeout = time.time() + wait_timeout
+ res_list = ()
+ while wait_timeout > time.time() and num_running < len(instids):
+ try:
+ res_list = ec2.get_all_instances(instids)
+ except boto.exception.BotoServerError as e:
+ if e.error_code == 'InvalidInstanceID.NotFound':
+ time.sleep(1)
+ continue
+ else:
+ raise
+
+ num_running = 0
+ for res in res_list:
+ num_running += len([i for i in res.instances if i.state == 'running'])
+ if len(res_list) <= 0:
+ # got a bad response of some sort, possibly due to
+ # stale/cached data. Wait a second and then try again
+ time.sleep(1)
+ continue
+ if wait and num_running < len(instids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
+
+ # We do this after the loop ends so that we end up with one list
+ for res in res_list:
+ running_instances.extend(res.instances)
+
+ # Enabled by default by AWS
+ if source_dest_check is False:
+ for inst in res.instances:
+ inst.modify_attribute('sourceDestCheck', False)
+
+ # Disabled by default by AWS
+ if termination_protection is True:
+ for inst in res.instances:
+ inst.modify_attribute('disableApiTermination', True)
+
+ # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
+ if instance_tags and instids:
+ try:
+ ec2.create_tags(instids, instance_tags)
+ except boto.exception.EC2ResponseError as e:
+ module.fail_json_aws(e, msg='Instance tagging failed')
+
+ instance_dict_array = []
+ created_instance_ids = []
+ for inst in running_instances:
+ inst.update()
+ d = get_instance_info(inst)
+ created_instance_ids.append(inst.id)
+ instance_dict_array.append(d)
+
+ return (instance_dict_array, created_instance_ids, changed)
+
+
+def terminate_instances(module, ec2, instance_ids):
+ """
+ Terminates a list of instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ termination_list: a list of instances to terminate in the form of
+ [ {id: <inst-id>}, ..]
+
+ Returns a dictionary of instance information
+ about the instances terminated.
+
+ If the instance to be terminated is running
+ "changed" will be set to False.
+
+ """
+
+ # Whether to wait for termination to complete before returning
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ terminated_instance_ids = []
+ for res in ec2.get_all_instances(instance_ids):
+ for inst in res.instances:
+ if inst.state == 'running' or inst.state == 'stopped':
+ terminated_instance_ids.append(inst.id)
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ ec2.terminate_instances([inst.id])
+ except EC2ResponseError as e:
+ module.fail_json_aws(e, msg='Unable to terminate instance {0}'.format(inst.id))
+ changed = True
+
+ # wait here until the instances are 'terminated'
+ if wait:
+ num_terminated = 0
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
+ response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
+ filters={'instance-state-name': 'terminated'})
+ try:
+ num_terminated = sum([len(res.instances) for res in response])
+ except Exception as e:
+ # got a bad response of some sort, possibly due to
+ # stale/cached data. Wait a second and then try again
+ time.sleep(1)
+ continue
+
+ if num_terminated < len(terminated_instance_ids):
+ time.sleep(5)
+
+ # waiting took too long
+ if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
+ module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
+ # Lets get the current state of the instances after terminating - issue600
+ instance_dict_array = []
+ for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
+ for inst in res.instances:
+ instance_dict_array.append(get_instance_info(inst))
+
+ return (changed, instance_dict_array, terminated_instance_ids)
+
+
+def startstop_instances(module, ec2, instance_ids, state, instance_tags):
+ """
+ Starts or stops a list of existing instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ instance_ids: The list of instances to start in the form of
+ [ {id: <inst-id>}, ..]
+ instance_tags: A dict of tag keys and values in the form of
+ {key: value, ... }
+ state: Intended state ("running" or "stopped")
+
+ Returns a dictionary of instance information
+ about the instances started/stopped.
+
+ If the instance was not able to change state,
+ "changed" will be set to False.
+
+ Note that if instance_ids and instance_tags are both non-empty,
+ this method will process the intersection of the two
+ """
+
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ group_id = module.params.get('group_id')
+ group_name = module.params.get('group')
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ # Fail unless the user defined instance tags
+ if not instance_tags:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
+ # An empty filter does no filtering, so it's safe to pass it to the
+ # get_all_instances method even if the user did not specify instance_tags
+ filters = {}
+ if instance_tags:
+ for key, value in instance_tags.items():
+ filters["tag:" + key] = value
+
+ filters['instance-state-name'] = ["pending", "running", "stopping", "stopped"]
+
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+ # Check that our instances are not in the state we want to take
+
+ # Check (and eventually change) instances attributes and instances state
+ existing_instances_array = []
+ for res in ec2.get_all_instances(instance_ids, filters=filters):
+ for inst in res.instances:
+
+ warn_if_public_ip_assignment_changed(module, inst)
+
+ changed = (check_source_dest_attr(module, inst, ec2) or
+ check_termination_protection(module, inst) or changed)
+
+ # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
+ if inst.vpc_id and group_name:
+ grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+ unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
+ if unmatched:
+ module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
+ group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
+ elif inst.vpc_id and group_id:
+ if isinstance(group_id, string_types):
+ group_id = [group_id]
+ grp_details = ec2.get_all_security_groups(group_ids=group_id)
+ group_ids = [grp_item.id for grp_item in grp_details]
+ if inst.vpc_id and (group_name or group_id):
+ if set(sg.id for sg in inst.groups) != set(group_ids):
+ changed = inst.modify_attribute('groupSet', group_ids)
+
+ # Check instance state
+ if inst.state != state:
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ if state == 'running':
+ inst.start()
+ else:
+ inst.stop()
+ except EC2ResponseError as e:
+ module.fail_json_aws(e, 'Unable to change state for instance {0}'.format(inst.id))
+ changed = True
+ existing_instances_array.append(inst.id)
+
+ instance_ids = list(set(existing_instances_array + (instance_ids or [])))
+ # Wait for all the instances to finish starting or stopping
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ instance_dict_array = []
+ matched_instances = []
+ for res in ec2.get_all_instances(instance_ids):
+ for i in res.instances:
+ if i.state == state:
+ instance_dict_array.append(get_instance_info(i))
+ matched_instances.append(i)
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
+
+ return (changed, instance_dict_array, instance_ids)
+
+
+def restart_instances(module, ec2, instance_ids, state, instance_tags):
+ """
+ Restarts a list of existing instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ instance_ids: The list of instances to start in the form of
+ [ {id: <inst-id>}, ..]
+ instance_tags: A dict of tag keys and values in the form of
+ {key: value, ... }
+ state: Intended state ("restarted")
+
+ Returns a dictionary of instance information
+ about the instances.
+
+ If the instance was not able to change state,
+ "changed" will be set to False.
+
+ Wait will not apply here as this is a OS level operation.
+
+ Note that if instance_ids and instance_tags are both non-empty,
+ this method will process the intersection of the two.
+ """
+
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ # Fail unless the user defined instance tags
+ if not instance_tags:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
+ # An empty filter does no filtering, so it's safe to pass it to the
+ # get_all_instances method even if the user did not specify instance_tags
+ filters = {}
+ if instance_tags:
+ for key, value in instance_tags.items():
+ filters["tag:" + key] = value
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+
+ # Check that our instances are not in the state we want to take
+
+ # Check (and eventually change) instances attributes and instances state
+ for res in ec2.get_all_instances(instance_ids, filters=filters):
+ for inst in res.instances:
+
+ warn_if_public_ip_assignment_changed(module, inst)
+
+ changed = (check_source_dest_attr(module, inst, ec2) or
+ check_termination_protection(module, inst) or changed)
+
+ # Check instance state
+ if inst.state != state:
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ inst.reboot()
+ except EC2ResponseError as e:
+ module.fail_json_aws(e, msg='Unable to change state for instance {0}'.format(inst.id))
+ changed = True
+
+ return (changed, instance_dict_array, instance_ids)
+
+
+def check_termination_protection(module, inst):
+ """
+ Check the instance disableApiTermination attribute.
+
+ module: Ansible module object
+ inst: EC2 instance object
+
+ returns: True if state changed None otherwise
+ """
+
+ termination_protection = module.params.get('termination_protection')
+
+ if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
+ inst.modify_attribute('disableApiTermination', termination_protection)
+ return True
+
+
+def check_source_dest_attr(module, inst, ec2):
+ """
+ Check the instance sourceDestCheck attribute.
+
+ module: Ansible module object
+ inst: EC2 instance object
+
+ returns: True if state changed None otherwise
+ """
+
+ source_dest_check = module.params.get('source_dest_check')
+
+ if source_dest_check is not None:
+ try:
+ if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
+ inst.modify_attribute('sourceDestCheck', source_dest_check)
+ return True
+ except boto.exception.EC2ResponseError as exc:
+ # instances with more than one Elastic Network Interface will
+ # fail, because they have the sourceDestCheck attribute defined
+ # per-interface
+ if exc.code == 'InvalidInstanceID':
+ for interface in inst.interfaces:
+ if interface.source_dest_check != source_dest_check:
+ ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
+ return True
+ else:
+ module.fail_json_aws(exc, msg='Failed to handle source_dest_check state for instance {0}'.format(inst.id))
+
+
+def warn_if_public_ip_assignment_changed(module, instance):
+ # This is a non-modifiable attribute.
+ assign_public_ip = module.params.get('assign_public_ip')
+
+ # Check that public ip assignment is the same and warn if not
+ public_dns_name = getattr(instance, 'public_dns_name', None)
+ if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False):
+ module.warn("Unable to modify public ip assignment to {0} for instance {1}. "
+ "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id))
+
+
+def main():
+ argument_spec = dict(
+ key_name=dict(aliases=['keypair']),
+ id=dict(),
+ group=dict(type='list', elements='str', aliases=['groups']),
+ group_id=dict(type='list', elements='str'),
+ zone=dict(aliases=['aws_zone', 'ec2_zone']),
+ instance_type=dict(aliases=['type']),
+ spot_price=dict(),
+ spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
+ spot_launch_group=dict(),
+ image=dict(),
+ kernel=dict(),
+ count=dict(type='int', default='1'),
+ monitoring=dict(type='bool', default=False),
+ ramdisk=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ spot_wait_timeout=dict(type='int', default=600),
+ placement_group=dict(),
+ user_data=dict(),
+ instance_tags=dict(type='dict'),
+ vpc_subnet_id=dict(),
+ assign_public_ip=dict(type='bool'),
+ private_ip=dict(),
+ instance_profile_name=dict(),
+ instance_ids=dict(type='list', elements='str', aliases=['instance_id']),
+ source_dest_check=dict(type='bool', default=None),
+ termination_protection=dict(type='bool', default=None),
+ state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
+ instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
+ exact_count=dict(type='int', default=None),
+ count_tag=dict(type='raw'),
+ volumes=dict(type='list', elements='dict',),
+ ebs_optimized=dict(type='bool', default=False),
+ tenancy=dict(default='default', choices=['default', 'dedicated']),
+ network_interfaces=dict(type='list', elements='str', aliases=['network_interface'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ check_boto3=False,
+ mutually_exclusive=[
+ # Can be uncommented when we finish the deprecation cycle.
+ # ['group', 'group_id'],
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['exact_count', 'instance_ids'],
+ ['network_interfaces', 'assign_public_ip'],
+ ['network_interfaces', 'group'],
+ ['network_interfaces', 'group_id'],
+ ['network_interfaces', 'private_ip'],
+ ['network_interfaces', 'vpc_subnet_id'],
+ ],
+ )
+
+ if module.params.get('group') and module.params.get('group_id'):
+ module.deprecate(
+ msg='Support for passing both group and group_id has been deprecated. '
+ 'Currently group_id is ignored, in future passing both will result in an error',
+ date='2022-06-01', collection_name='amazon.aws')
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+ if module.params.get('region') or not module.params.get('ec2_url'):
+ ec2 = ec2_connect(module)
+ elif module.params.get('ec2_url'):
+ ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
+
+ if 'region' not in aws_connect_kwargs:
+ aws_connect_kwargs['region'] = ec2.region
+
+ vpc = connect_vpc(**aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json_aws(e, msg='Failed to get connection')
+
+ tagged_instances = []
+
+ state = module.params['state']
+
+ if state == 'absent':
+ instance_ids = module.params['instance_ids']
+ if not instance_ids:
+ module.fail_json(msg='instance_ids list is required for absent state')
+
+ (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
+
+ elif state in ('running', 'stopped'):
+ instance_ids = module.params.get('instance_ids')
+ instance_tags = module.params.get('instance_tags')
+ if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
+ module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
+
+ (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
+
+ elif state in ('restarted'):
+ instance_ids = module.params.get('instance_ids')
+ instance_tags = module.params.get('instance_tags')
+ if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
+ module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
+
+ (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+
+ if module.params.get('exact_count') is None:
+ (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
+ else:
+ (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
+
+ # Always return instances in the same order
+ if new_instance_ids:
+ new_instance_ids.sort()
+ if instance_dict_array:
+ instance_dict_array.sort(key=lambda x: x['id'])
+ if tagged_instances:
+ tagged_instances.sort(key=lambda x: x['id'])
+
+ module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
new file mode 100644
index 00000000..86364f78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
@@ -0,0 +1,761 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami
+version_added: 1.0.0
+short_description: Create or destroy an image (AMI) in ec2
+description:
+ - Registers or deregisters ec2 images.
+options:
+ instance_id:
+ description:
+ - Instance ID to create the AMI from.
+ type: str
+ name:
+ description:
+ - The name of the new AMI.
+ type: str
+ architecture:
+ description:
+ - The target architecture of the image to register
+ default: "x86_64"
+ type: str
+ kernel_id:
+ description:
+ - The target kernel id of the image to register.
+ type: str
+ virtualization_type:
+ description:
+ - The virtualization type of the image to register.
+ default: "hvm"
+ type: str
+ root_device_name:
+ description:
+ - The root device name of the image to register.
+ type: str
+ wait:
+ description:
+ - Wait for the AMI to be in state 'available' before returning.
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 1200
+ type: int
+ state:
+ description:
+ - Register or deregister an AMI.
+ default: 'present'
+ choices: [ "absent", "present" ]
+ type: str
+ description:
+ description:
+ - Human-readable string describing the contents and purpose of the AMI.
+ type: str
+ no_reboot:
+ description:
+ - Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the
+ responsibility of maintaining file system integrity is left to the owner of the instance.
+ default: false
+ type: bool
+ image_id:
+ description:
+ - Image ID to be deregistered.
+ type: str
+ device_mapping:
+ description:
+ - List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters).
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ type: str
+ description:
+ - The device name. For example C(/dev/sda).
+ required: yes
+ aliases: ['DeviceName']
+ virtual_name:
+ type: str
+ description:
+ - The virtual name for the device.
+ - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html).
+ - Alias C(VirtualName) has been deprecated and will be removed after 2022-06-01.
+ aliases: ['VirtualName']
+ no_device:
+ type: bool
+ description:
+ - Suppresses the specified device included in the block device mapping of the AMI.
+ - Alias C(NoDevice) has been deprecated and will be removed after 2022-06-01.
+ aliases: ['NoDevice']
+ volume_type:
+ type: str
+ description: The volume type. Defaults to C(gp2) when not set.
+ delete_on_termination:
+ type: bool
+ description: Whether the device should be automatically deleted when the Instance is terminated.
+ snapshot_id:
+ type: str
+ description: The ID of the Snapshot.
+ iops:
+ type: int
+ description: When using an C(io1) I(volume_type) this sets the number of IOPS provisioned for the volume
+ encrypted:
+ type: bool
+ description: Whether the volume should be encrypted.
+ volume_size:
+ aliases: ['size']
+ type: int
+ description: The size of the volume (in GiB)
+ delete_snapshot:
+ description:
+ - Delete snapshots when deregistering the AMI.
+ default: false
+ type: bool
+ tags:
+ description:
+ - A dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}'
+ type: dict
+ purge_tags:
+ description: Whether to remove existing tags that aren't passed in the C(tags) parameter
+ default: false
+ type: bool
+ launch_permissions:
+ description:
+ - Users and groups that should be able to launch the AMI. Expects dictionary with a key of user_ids and/or group_names. user_ids should
+ be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently.
+ - You must pass all desired launch permissions if you wish to modify existing launch permissions (passing just groups will remove all users)
+ type: dict
+ image_location:
+ description:
+ - The s3 location of an image to use for the AMI.
+ type: str
+ enhanced_networking:
+ description:
+ - A boolean representing whether enhanced networking with ENA is enabled or not.
+ type: bool
+ billing_products:
+ description:
+ - A list of valid billing codes. To be used with valid accounts by aws marketplace vendors.
+ type: list
+ elements: str
+ ramdisk_id:
+ description:
+ - The ID of the RAM disk.
+ type: str
+ sriov_net_support:
+ description:
+ - Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI.
+ type: str
+author:
+ - "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
+ - "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>"
+ - "Ross Williams (@gunzy83) <gunzy83au@gmail.com>"
+ - "Willem van Ketwich (@wilvk) <willvk@gmail.com>"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+# Thank you to iAcquire for sponsoring development of this module.
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Basic AMI Creation
+ amazon.aws.ec2_ami:
+ instance_id: i-xxxxxx
+ wait: yes
+ name: newtest
+ tags:
+ Name: newtest
+ Service: TestService
+
+- name: Basic AMI Creation, without waiting
+ amazon.aws.ec2_ami:
+ instance_id: i-xxxxxx
+ wait: no
+ name: newtest
+
+- name: AMI Registration from EBS Snapshot
+ amazon.aws.ec2_ami:
+ name: newtest
+ state: present
+ architecture: x86_64
+ virtualization_type: hvm
+ root_device_name: /dev/xvda
+ device_mapping:
+ - device_name: /dev/xvda
+ volume_size: 8
+ snapshot_id: snap-xxxxxxxx
+ delete_on_termination: true
+ volume_type: gp2
+
+- name: AMI Creation, with a custom root-device size and another EBS attached
+ amazon.aws.ec2_ami:
+ instance_id: i-xxxxxx
+ name: newtest
+ device_mapping:
+ - device_name: /dev/sda1
+ size: XXX
+ delete_on_termination: true
+ volume_type: gp2
+ - device_name: /dev/sdb
+ size: YYY
+ delete_on_termination: false
+ volume_type: gp2
+
+- name: AMI Creation, excluding a volume attached at /dev/sdb
+ amazon.aws.ec2_ami:
+ instance_id: i-xxxxxx
+ name: newtest
+ device_mapping:
+ - device_name: /dev/sda1
+ size: XXX
+ delete_on_termination: true
+ volume_type: gp2
+ - device_name: /dev/sdb
+ no_device: yes
+
+- name: Deregister/Delete AMI (keep associated snapshots)
+ amazon.aws.ec2_ami:
+ image_id: "{{ instance.image_id }}"
+ delete_snapshot: False
+ state: absent
+
+- name: Deregister AMI (delete associated snapshots too)
+ amazon.aws.ec2_ami:
+ image_id: "{{ instance.image_id }}"
+ delete_snapshot: True
+ state: absent
+
+- name: Update AMI Launch Permissions, making it public
+ amazon.aws.ec2_ami:
+ image_id: "{{ instance.image_id }}"
+ state: present
+ launch_permissions:
+ group_names: ['all']
+
+- name: Allow AMI to be launched by another account
+ amazon.aws.ec2_ami:
+ image_id: "{{ instance.image_id }}"
+ state: present
+ launch_permissions:
+ user_ids: ['123456789012']
+'''
+
+RETURN = '''
+architecture:
+ description: Architecture of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "x86_64"
+block_device_mapping:
+ description: Block device mapping associated with image.
+ returned: when AMI is created or already exists
+ type: dict
+ sample: {
+ "/dev/sda1": {
+ "delete_on_termination": true,
+ "encrypted": false,
+ "size": 10,
+ "snapshot_id": "snap-1a03b80e7",
+ "volume_type": "standard"
+ }
+ }
+creationDate:
+ description: Creation date of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "2015-10-15T22:43:44.000Z"
+description:
+ description: Description of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "nat-server"
+hypervisor:
+ description: Type of hypervisor.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "xen"
+image_id:
+ description: ID of the image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "ami-1234abcd"
+is_public:
+ description: Whether image is public.
+ returned: when AMI is created or already exists
+ type: bool
+ sample: false
+launch_permission:
+ description: Permissions allowing other accounts to access the AMI.
+ returned: when AMI is created or already exists
+ type: list
+ sample:
+ - group: "all"
+location:
+ description: Location of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "315210894379/nat-server"
+name:
+ description: AMI name of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "nat-server"
+ownerId:
+ description: Owner of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "435210894375"
+platform:
+ description: Platform of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: null
+root_device_name:
+ description: Root device name of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "/dev/sda1"
+root_device_type:
+ description: Root device type of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "ebs"
+state:
+ description: State of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "available"
+tags:
+ description: A dictionary of tags assigned to image.
+ returned: when AMI is created or already exists
+ type: dict
+ sample: {
+ "Env": "devel",
+ "Name": "nat-server"
+ }
+virtualization_type:
+ description: Image virtualization type.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "hvm"
+snapshots_deleted:
+ description: A list of snapshot ids deleted after deregistering image.
+ returned: after AMI is deregistered, if I(delete_snapshot=true)
+ type: list
+ sample: [
+ "snap-fbcccb8f",
+ "snap-cfe7cdb4"
+ ]
+'''
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ..module_utils.ec2 import compare_aws_tags
+from ..module_utils.waiters import get_waiter
+
+
+def get_block_device_mapping(image):
+ bdm_dict = dict()
+ if image is not None and image.get('block_device_mappings') is not None:
+ bdm = image.get('block_device_mappings')
+ for device in bdm:
+ device_name = device.get('device_name')
+ if 'ebs' in device:
+ ebs = device.get("ebs")
+ bdm_dict_item = {
+ 'size': ebs.get("volume_size"),
+ 'snapshot_id': ebs.get("snapshot_id"),
+ 'volume_type': ebs.get("volume_type"),
+ 'encrypted': ebs.get("encrypted"),
+ 'delete_on_termination': ebs.get("delete_on_termination")
+ }
+ elif 'virtual_name' in device:
+ bdm_dict_item = dict(virtual_name=device['virtual_name'])
+ bdm_dict[device_name] = bdm_dict_item
+ return bdm_dict
+
+
+def get_ami_info(camel_image):
+ image = camel_dict_to_snake_dict(camel_image)
+ return dict(
+ image_id=image.get("image_id"),
+ state=image.get("state"),
+ architecture=image.get("architecture"),
+ block_device_mapping=get_block_device_mapping(image),
+ creationDate=image.get("creation_date"),
+ description=image.get("description"),
+ hypervisor=image.get("hypervisor"),
+ is_public=image.get("public"),
+ location=image.get("image_location"),
+ ownerId=image.get("owner_id"),
+ root_device_name=image.get("root_device_name"),
+ root_device_type=image.get("root_device_type"),
+ virtualization_type=image.get("virtualization_type"),
+ name=image.get("name"),
+ tags=boto3_tag_list_to_ansible_dict(image.get('tags')),
+ platform=image.get("platform"),
+ enhanced_networking=image.get("ena_support"),
+ image_owner_alias=image.get("image_owner_alias"),
+ image_type=image.get("image_type"),
+ kernel_id=image.get("kernel_id"),
+ product_codes=image.get("product_codes"),
+ ramdisk_id=image.get("ramdisk_id"),
+ sriov_net_support=image.get("sriov_net_support"),
+ state_reason=image.get("state_reason"),
+ launch_permissions=image.get('launch_permissions')
+ )
+
+
+def create_image(module, connection):
+ instance_id = module.params.get('instance_id')
+ name = module.params.get('name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ description = module.params.get('description')
+ architecture = module.params.get('architecture')
+ kernel_id = module.params.get('kernel_id')
+ root_device_name = module.params.get('root_device_name')
+ virtualization_type = module.params.get('virtualization_type')
+ no_reboot = module.params.get('no_reboot')
+ device_mapping = module.params.get('device_mapping')
+ tags = module.params.get('tags')
+ launch_permissions = module.params.get('launch_permissions')
+ image_location = module.params.get('image_location')
+ enhanced_networking = module.params.get('enhanced_networking')
+ billing_products = module.params.get('billing_products')
+ ramdisk_id = module.params.get('ramdisk_id')
+ sriov_net_support = module.params.get('sriov_net_support')
+
+ try:
+ params = {
+ 'Name': name,
+ 'Description': description
+ }
+
+ block_device_mapping = None
+
+ # Remove empty values injected by using options
+ if device_mapping:
+ block_device_mapping = []
+ for device in device_mapping:
+ device = dict((k, v) for k, v in device.items() if v is not None)
+ device['Ebs'] = {}
+ device = rename_item_if_exists(device, 'device_name', 'DeviceName')
+ device = rename_item_if_exists(device, 'virtual_name', 'VirtualName')
+ device = rename_item_if_exists(device, 'no_device', 'NoDevice')
+ device = rename_item_if_exists(device, 'volume_type', 'VolumeType', 'Ebs')
+ device = rename_item_if_exists(device, 'snapshot_id', 'SnapshotId', 'Ebs')
+ device = rename_item_if_exists(device, 'delete_on_termination', 'DeleteOnTermination', 'Ebs')
+ device = rename_item_if_exists(device, 'size', 'VolumeSize', 'Ebs', attribute_type=int)
+ device = rename_item_if_exists(device, 'volume_size', 'VolumeSize', 'Ebs', attribute_type=int)
+ device = rename_item_if_exists(device, 'iops', 'Iops', 'Ebs')
+ device = rename_item_if_exists(device, 'encrypted', 'Encrypted', 'Ebs')
+ block_device_mapping.append(device)
+ if block_device_mapping:
+ params['BlockDeviceMappings'] = block_device_mapping
+ if instance_id:
+ params['InstanceId'] = instance_id
+ params['NoReboot'] = no_reboot
+ image_id = connection.create_image(aws_retry=True, **params).get('ImageId')
+ else:
+ if architecture:
+ params['Architecture'] = architecture
+ if virtualization_type:
+ params['VirtualizationType'] = virtualization_type
+ if image_location:
+ params['ImageLocation'] = image_location
+ if enhanced_networking:
+ params['EnaSupport'] = enhanced_networking
+ if billing_products:
+ params['BillingProducts'] = billing_products
+ if ramdisk_id:
+ params['RamdiskId'] = ramdisk_id
+ if sriov_net_support:
+ params['SriovNetSupport'] = sriov_net_support
+ if kernel_id:
+ params['KernelId'] = kernel_id
+ if root_device_name:
+ params['RootDeviceName'] = root_device_name
+ image_id = connection.register_image(aws_retry=True, **params).get('ImageId')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error registering image")
+
+ if wait:
+ delay = 15
+ max_attempts = wait_timeout // delay
+ waiter = get_waiter(connection, 'image_available')
+ waiter.wait(ImageIds=[image_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts))
+
+ if tags:
+ try:
+ connection.create_tags(aws_retry=True, Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error tagging image")
+
+ if launch_permissions:
+ try:
+ params = dict(Attribute='LaunchPermission', ImageId=image_id, LaunchPermission=dict(Add=list()))
+ for group_name in launch_permissions.get('group_names', []):
+ params['LaunchPermission']['Add'].append(dict(Group=group_name))
+ for user_id in launch_permissions.get('user_ids', []):
+ params['LaunchPermission']['Add'].append(dict(UserId=str(user_id)))
+ if params['LaunchPermission']['Add']:
+ connection.modify_image_attribute(aws_retry=True, **params)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error setting launch permissions for image %s" % image_id)
+
+ module.exit_json(msg="AMI creation operation complete.", changed=True,
+ **get_ami_info(get_image_by_id(module, connection, image_id)))
+
+
+def deregister_image(module, connection):
+ image_id = module.params.get('image_id')
+ delete_snapshot = module.params.get('delete_snapshot')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ image = get_image_by_id(module, connection, image_id)
+
+ if image is None:
+ module.exit_json(changed=False)
+
+ # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable.
+ snapshots = []
+ if 'BlockDeviceMappings' in image:
+ for mapping in image.get('BlockDeviceMappings'):
+ snapshot_id = mapping.get('Ebs', {}).get('SnapshotId')
+ if snapshot_id is not None:
+ snapshots.append(snapshot_id)
+
+ # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes.
+ if 'ImageId' in image:
+ try:
+ connection.deregister_image(aws_retry=True, ImageId=image_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error deregistering image")
+ else:
+ module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False)
+
+ image = get_image_by_id(module, connection, image_id)
+ wait_timeout = time.time() + wait_timeout
+
+ while wait and wait_timeout > time.time() and image is not None:
+ image = get_image_by_id(module, connection, image_id)
+ time.sleep(3)
+
+ if wait and wait_timeout <= time.time():
+ module.fail_json(msg="Timed out waiting for image to be deregistered.")
+
+ exit_params = {'msg': "AMI deregister operation complete.", 'changed': True}
+
+ if delete_snapshot:
+ for snapshot_id in snapshots:
+ try:
+ connection.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id)
+ # Don't error out if root volume snapshot was already deregistered as part of deregister_image
+ except is_boto3_error_code('InvalidSnapshot.NotFound'):
+ pass
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to delete snapshot.')
+ exit_params['snapshots_deleted'] = snapshots
+
+ module.exit_json(**exit_params)
+
+
+def update_image(module, connection, image_id):
+ launch_permissions = module.params.get('launch_permissions')
+ image = get_image_by_id(module, connection, image_id)
+ if image is None:
+ module.fail_json(msg="Image %s does not exist" % image_id, changed=False)
+ changed = False
+
+ if launch_permissions is not None:
+ current_permissions = image['LaunchPermissions']
+
+ current_users = set(permission['UserId'] for permission in current_permissions if 'UserId' in permission)
+ desired_users = set(str(user_id) for user_id in launch_permissions.get('user_ids', []))
+ current_groups = set(permission['Group'] for permission in current_permissions if 'Group' in permission)
+ desired_groups = set(launch_permissions.get('group_names', []))
+
+ to_add_users = desired_users - current_users
+ to_remove_users = current_users - desired_users
+ to_add_groups = desired_groups - current_groups
+ to_remove_groups = current_groups - desired_groups
+
+ to_add = [dict(Group=group) for group in to_add_groups] + [dict(UserId=user_id) for user_id in to_add_users]
+ to_remove = [dict(Group=group) for group in to_remove_groups] + [dict(UserId=user_id) for user_id in to_remove_users]
+
+ if to_add or to_remove:
+ try:
+ connection.modify_image_attribute(aws_retry=True,
+ ImageId=image_id, Attribute='launchPermission',
+ LaunchPermission=dict(Add=to_add, Remove=to_remove))
+ changed = True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error updating launch permissions of image %s" % image_id)
+
+ desired_tags = module.params.get('tags')
+ if desired_tags is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(image.get('Tags'))
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags=module.params.get('purge_tags'))
+
+ if tags_to_remove:
+ try:
+ connection.delete_tags(aws_retry=True, Resources=[image_id], Tags=[dict(Key=tagkey) for tagkey in tags_to_remove])
+ changed = True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error updating tags")
+
+ if tags_to_add:
+ try:
+ connection.create_tags(aws_retry=True, Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags_to_add))
+ changed = True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error updating tags")
+
+ description = module.params.get('description')
+ if description and description != image['Description']:
+ try:
+ connection.modify_image_attribute(aws_retry=True, Attribute='Description ', ImageId=image_id, Description=dict(Value=description))
+ changed = True
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error setting description for image %s" % image_id)
+
+ if changed:
+ module.exit_json(msg="AMI updated.", changed=True,
+ **get_ami_info(get_image_by_id(module, connection, image_id)))
+ else:
+ module.exit_json(msg="AMI not updated.", changed=False,
+ **get_ami_info(get_image_by_id(module, connection, image_id)))
+
+
+def get_image_by_id(module, connection, image_id):
+ try:
+ try:
+ images_response = connection.describe_images(aws_retry=True, ImageIds=[image_id])
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error retrieving image %s" % image_id)
+ images = images_response.get('Images')
+ no_images = len(images)
+ if no_images == 0:
+ return None
+ if no_images == 1:
+ result = images[0]
+ try:
+ result['LaunchPermissions'] = connection.describe_image_attribute(aws_retry=True, Attribute='launchPermission',
+ ImageId=image_id)['LaunchPermissions']
+ result['ProductCodes'] = connection.describe_image_attribute(aws_retry=True, Attribute='productCodes',
+ ImageId=image_id)['ProductCodes']
+ except is_boto3_error_code('InvalidAMIID.Unavailable'):
+ pass
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id)
+ return result
+ module.fail_json(msg="Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Error retrieving image by image_id")
+
+
+def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None):
+ new_item = dict_object.get(attribute)
+ if new_item is not None:
+ if attribute_type is not None:
+ new_item = attribute_type(new_item)
+ if child_node is None:
+ dict_object[new_attribute] = new_item
+ else:
+ dict_object[child_node][new_attribute] = new_item
+ dict_object.pop(attribute)
+ return dict_object
+
+
+def main():
+ mapping_options = dict(
+ device_name=dict(type='str', aliases=['DeviceName'], required=True),
+ virtual_name=dict(
+ type='str', aliases=['VirtualName'],
+ deprecated_aliases=[dict(name='VirtualName', date='2022-06-01', collection_name='amazon.aws')]),
+ no_device=dict(
+ type='bool', aliases=['NoDevice'],
+ deprecated_aliases=[dict(name='NoDevice', date='2022-06-01', collection_name='amazon.aws')]),
+ volume_type=dict(type='str'),
+ delete_on_termination=dict(type='bool'),
+ snapshot_id=dict(type='str'),
+ iops=dict(type='int'),
+ encrypted=dict(type='bool'),
+ volume_size=dict(type='int', aliases=['size']),
+ )
+ argument_spec = dict(
+ instance_id=dict(),
+ image_id=dict(),
+ architecture=dict(default='x86_64'),
+ kernel_id=dict(),
+ virtualization_type=dict(default='hvm'),
+ root_device_name=dict(),
+ delete_snapshot=dict(default=False, type='bool'),
+ name=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(default=1200, type='int'),
+ description=dict(default=''),
+ no_reboot=dict(default=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent']),
+ device_mapping=dict(type='list', elements='dict', options=mapping_options),
+ tags=dict(type='dict'),
+ launch_permissions=dict(type='dict'),
+ image_location=dict(),
+ enhanced_networking=dict(type='bool'),
+ billing_products=dict(type='list', elements='str',),
+ ramdisk_id=dict(),
+ sriov_net_support=dict(),
+ purge_tags=dict(type='bool', default=False)
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['state', 'absent', ['image_id']],
+ ]
+ )
+
+ # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by
+ # the required_if for state=absent, so check manually instead
+ if not any([module.params['image_id'], module.params['name']]):
+ module.fail_json(msg="one of the following is required: name, image_id")
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ if module.params.get('state') == 'absent':
+ deregister_image(module, connection)
+ elif module.params.get('state') == 'present':
+ if module.params.get('image_id'):
+ update_image(module, connection, module.params.get('image_id'))
+ if not module.params.get('instance_id') and not module.params.get('device_mapping'):
+ module.fail_json(msg="The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image.")
+ create_image(module, connection)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_facts.py
new file mode 100644
index 00000000..f2b52556
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_facts.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_info
+version_added: 1.0.0
+short_description: Gather information about ec2 AMIs
+description:
+ - Gather information about ec2 AMIs
+ - This module was called C(amazon.aws.ec2_ami_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Prasad Katti (@prasadkatti)
+requirements: [ boto3 ]
+options:
+ image_ids:
+ description: One or more image IDs.
+ aliases: [image_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
+ - Filter names and values are case sensitive.
+ type: dict
+ owners:
+ description:
+ - Filter the images by the owner. Valid options are an AWS account ID, self,
+ or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
+ aliases: [owner]
+ type: list
+ elements: str
+ executable_users:
+ description:
+ - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
+ aliases: [executable_user]
+ type: list
+ elements: str
+ describe_image_attributes:
+ description:
+ - Describe attributes (like launchPermission) of the images found.
+ default: no
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: gather information about an AMI using ami-id
+ amazon.aws.ec2_ami_info:
+ image_ids: ami-5b488823
+
+- name: gather information about all AMIs with tag key Name and value webapp
+ amazon.aws.ec2_ami_info:
+ filters:
+ "tag:Name": webapp
+
+- name: gather information about an AMI with 'AMI Name' equal to foobar
+ amazon.aws.ec2_ami_info:
+ filters:
+ name: foobar
+
+- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
+ amazon.aws.ec2_ami_info:
+ owners: 099720109477
+ filters:
+ name: "ubuntu/images/ubuntu-zesty-17.04-*"
+'''
+
+RETURN = '''
+images:
+ description: A list of images.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ architecture:
+ description: The architecture of the image.
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ device_name:
+ description: The device name exposed to the instance.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ ebs:
+ description: EBS volumes
+ returned: always
+ type: complex
+ creation_date:
+ description: The date and time the image was created.
+ returned: always
+ type: str
+ sample: '2017-10-16T19:22:13.000Z'
+ description:
+ description: The description of the AMI.
+ returned: always
+ type: str
+ sample: ''
+ ena_support:
+ description: Whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ hypervisor:
+ description: The hypervisor type of the image.
+ returned: always
+ type: str
+ sample: xen
+ image_id:
+ description: The ID of the AMI.
+ returned: always
+ type: str
+ sample: ami-5b466623
+ image_location:
+ description: The location of the AMI.
+ returned: always
+ type: str
+ sample: 408466080000/Webapp
+ image_type:
+ description: The type of image.
+ returned: always
+ type: str
+ sample: machine
+ launch_permissions:
+ description: A List of AWS accounts may launch the AMI.
+ returned: When image is owned by calling account and I(describe_image_attributes) is yes.
+ type: list
+ elements: dict
+ contains:
+ group:
+ description: A value of 'all' means the AMI is public.
+ type: str
+ user_id:
+ description: An AWS account ID with permissions to launch the AMI.
+ type: str
+ sample: [{"group": "all"}, {"user_id": "408466080000"}]
+ name:
+ description: The name of the AMI that was provided during image creation.
+ returned: always
+ type: str
+ sample: Webapp
+ owner_id:
+ description: The AWS account ID of the image owner.
+ returned: always
+ type: str
+ sample: '408466080000'
+ public:
+ description: Whether the image has public launch permissions.
+ returned: always
+ type: bool
+ sample: true
+ root_device_name:
+ description: The device name of the root device.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ sriov_net_support:
+ description: Whether enhanced networking is enabled.
+ returned: always
+ type: str
+ sample: simple
+ state:
+ description: The current state of the AMI.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the image.
+ returned: always
+ type: dict
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_images(ec2_client, module):
+
+ image_ids = module.params.get("image_ids")
+ owners = module.params.get("owners")
+ executable_users = module.params.get("executable_users")
+ filters = module.params.get("filters")
+ owner_param = []
+
+ # describe_images is *very* slow if you pass the `Owners`
+ # param (unless it's self), for some reason.
+ # Converting the owners to filters and removing from the
+ # owners param greatly speeds things up.
+ # Implementation based on aioue's suggestion in #24886
+ for owner in owners:
+ if owner.isdigit():
+ if 'owner-id' not in filters:
+ filters['owner-id'] = list()
+ filters['owner-id'].append(owner)
+ elif owner == 'self':
+ # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ owner_param.append(owner)
+ else:
+ if 'owner-alias' not in filters:
+ filters['owner-alias'] = list()
+ filters['owner-alias'].append(owner)
+
+ filters = ansible_dict_to_boto3_filter_list(filters)
+
+ try:
+ images = ec2_client.describe_images(aws_retry=True, ImageIds=image_ids, Filters=filters, Owners=owner_param,
+ ExecutableUsers=executable_users)
+ images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, msg="error describing images")
+ for image in images:
+ try:
+ image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
+ if module.params.get("describe_image_attributes"):
+ launch_permissions = ec2_client.describe_image_attribute(aws_retry=True, Attribute='launchPermission',
+ ImageId=image['image_id'])['LaunchPermissions']
+ image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
+ except is_boto3_error_code('AuthFailure'):
+ # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
+ pass
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, 'Failed to describe AMI')
+
+ images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
+ module.exit_json(images=images)
+
+
+def main():
+
+ argument_spec = dict(
+ image_ids=dict(default=[], type='list', elements='str', aliases=['image_id']),
+ filters=dict(default={}, type='dict'),
+ owners=dict(default=[], type='list', elements='str', aliases=['owner']),
+ executable_users=dict(default=[], type='list', elements='str', aliases=['executable_user']),
+ describe_image_attributes=dict(default=False, type='bool')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._module._name == 'ec2_ami_facts':
+ module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_images(ec2_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
new file mode 100644
index 00000000..f2b52556
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_info
+version_added: 1.0.0
+short_description: Gather information about ec2 AMIs
+description:
+ - Gather information about ec2 AMIs
+ - This module was called C(amazon.aws.ec2_ami_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Prasad Katti (@prasadkatti)
+requirements: [ boto3 ]
+options:
+ image_ids:
+ description: One or more image IDs.
+ aliases: [image_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
+ - Filter names and values are case sensitive.
+ type: dict
+ owners:
+ description:
+ - Filter the images by the owner. Valid options are an AWS account ID, self,
+ or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
+ aliases: [owner]
+ type: list
+ elements: str
+ executable_users:
+ description:
+ - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
+ aliases: [executable_user]
+ type: list
+ elements: str
+ describe_image_attributes:
+ description:
+ - Describe attributes (like launchPermission) of the images found.
+ default: no
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: gather information about an AMI using ami-id
+ amazon.aws.ec2_ami_info:
+ image_ids: ami-5b488823
+
+- name: gather information about all AMIs with tag key Name and value webapp
+ amazon.aws.ec2_ami_info:
+ filters:
+ "tag:Name": webapp
+
+- name: gather information about an AMI with 'AMI Name' equal to foobar
+ amazon.aws.ec2_ami_info:
+ filters:
+ name: foobar
+
+- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
+ amazon.aws.ec2_ami_info:
+ owners: 099720109477
+ filters:
+ name: "ubuntu/images/ubuntu-zesty-17.04-*"
+'''
+
+RETURN = '''
+images:
+ description: A list of images.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ architecture:
+ description: The architecture of the image.
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ device_name:
+ description: The device name exposed to the instance.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ ebs:
+ description: EBS volumes
+ returned: always
+ type: complex
+ creation_date:
+ description: The date and time the image was created.
+ returned: always
+ type: str
+ sample: '2017-10-16T19:22:13.000Z'
+ description:
+ description: The description of the AMI.
+ returned: always
+ type: str
+ sample: ''
+ ena_support:
+ description: Whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ hypervisor:
+ description: The hypervisor type of the image.
+ returned: always
+ type: str
+ sample: xen
+ image_id:
+ description: The ID of the AMI.
+ returned: always
+ type: str
+ sample: ami-5b466623
+ image_location:
+ description: The location of the AMI.
+ returned: always
+ type: str
+ sample: 408466080000/Webapp
+ image_type:
+ description: The type of image.
+ returned: always
+ type: str
+ sample: machine
+ launch_permissions:
+ description: A List of AWS accounts may launch the AMI.
+ returned: When image is owned by calling account and I(describe_image_attributes) is yes.
+ type: list
+ elements: dict
+ contains:
+ group:
+ description: A value of 'all' means the AMI is public.
+ type: str
+ user_id:
+ description: An AWS account ID with permissions to launch the AMI.
+ type: str
+ sample: [{"group": "all"}, {"user_id": "408466080000"}]
+ name:
+ description: The name of the AMI that was provided during image creation.
+ returned: always
+ type: str
+ sample: Webapp
+ owner_id:
+ description: The AWS account ID of the image owner.
+ returned: always
+ type: str
+ sample: '408466080000'
+ public:
+ description: Whether the image has public launch permissions.
+ returned: always
+ type: bool
+ sample: true
+ root_device_name:
+ description: The device name of the root device.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ sriov_net_support:
+ description: Whether enhanced networking is enabled.
+ returned: always
+ type: str
+ sample: simple
+ state:
+ description: The current state of the AMI.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the image.
+ returned: always
+ type: dict
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_images(ec2_client, module):
+
+ image_ids = module.params.get("image_ids")
+ owners = module.params.get("owners")
+ executable_users = module.params.get("executable_users")
+ filters = module.params.get("filters")
+ owner_param = []
+
+ # describe_images is *very* slow if you pass the `Owners`
+ # param (unless it's self), for some reason.
+ # Converting the owners to filters and removing from the
+ # owners param greatly speeds things up.
+ # Implementation based on aioue's suggestion in #24886
+ for owner in owners:
+ if owner.isdigit():
+ if 'owner-id' not in filters:
+ filters['owner-id'] = list()
+ filters['owner-id'].append(owner)
+ elif owner == 'self':
+ # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ owner_param.append(owner)
+ else:
+ if 'owner-alias' not in filters:
+ filters['owner-alias'] = list()
+ filters['owner-alias'].append(owner)
+
+ filters = ansible_dict_to_boto3_filter_list(filters)
+
+ try:
+ images = ec2_client.describe_images(aws_retry=True, ImageIds=image_ids, Filters=filters, Owners=owner_param,
+ ExecutableUsers=executable_users)
+ images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, msg="error describing images")
+ for image in images:
+ try:
+ image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
+ if module.params.get("describe_image_attributes"):
+ launch_permissions = ec2_client.describe_image_attribute(aws_retry=True, Attribute='launchPermission',
+ ImageId=image['image_id'])['LaunchPermissions']
+ image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
+ except is_boto3_error_code('AuthFailure'):
+ # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
+ pass
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, 'Failed to describe AMI')
+
+ images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
+ module.exit_json(images=images)
+
+
+def main():
+
+ argument_spec = dict(
+ image_ids=dict(default=[], type='list', elements='str', aliases=['image_id']),
+ filters=dict(default={}, type='dict'),
+ owners=dict(default=[], type='list', elements='str', aliases=['owner']),
+ executable_users=dict(default=[], type='list', elements='str', aliases=['executable_user']),
+ describe_image_attributes=dict(default=False, type='bool')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._module._name == 'ec2_ami_facts':
+ module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_images(ec2_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py
new file mode 100644
index 00000000..a1e732e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py
@@ -0,0 +1,1338 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_elb_lb
+version_added: 1.0.0
+description:
+ - Returns information about the load balancer.
+ - Will be marked changed when called only if state is changed.
+short_description: Creates, updates or destroys an Amazon ELB.
+author:
+ - "Jim Dalton (@jsdalton)"
+options:
+ state:
+ description:
+ - Create or destroy the ELB.
+ type: str
+ choices: [ absent, present ]
+ required: true
+ name:
+ description:
+ - The name of the ELB.
+ type: str
+ required: true
+ listeners:
+ description:
+ - List of ports/protocols for this ELB to listen on (see examples).
+ type: list
+ elements: dict
+ purge_listeners:
+ description:
+ - Purge existing listeners on ELB that are not found in listeners.
+ type: bool
+ default: yes
+ instance_ids:
+ description:
+ - List of instance ids to attach to this ELB.
+ type: list
+ elements: str
+ purge_instance_ids:
+ description:
+ - Purge existing instance ids on ELB that are not found in instance_ids.
+ type: bool
+ default: no
+ zones:
+ description:
+ - List of availability zones to enable on this ELB.
+ type: list
+ elements: str
+ purge_zones:
+ description:
+ - Purge existing availability zones on ELB that are not found in zones.
+ type: bool
+ default: no
+ security_group_ids:
+ description:
+ - A list of security groups to apply to the ELB.
+ type: list
+ elements: str
+ security_group_names:
+ description:
+ - A list of security group names to apply to the ELB.
+ type: list
+ elements: str
+ health_check:
+ description:
+ - An associative array of health check configuration settings (see examples).
+ type: dict
+ access_logs:
+ description:
+ - An associative array of access logs configuration settings (see examples).
+ type: dict
+ subnets:
+ description:
+ - A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
+ type: list
+ elements: str
+ purge_subnets:
+ description:
+ - Purge existing subnet on ELB that are not found in subnets.
+ type: bool
+ default: no
+ scheme:
+ description:
+ - The scheme to use when creating the ELB. For a private VPC-visible ELB use C(internal).
+ - If you choose to update your scheme with a different value the ELB will be destroyed and
+ recreated. To update scheme you must use the option I(wait).
+ type: str
+ choices: ["internal", "internet-facing"]
+ default: 'internet-facing'
+ validate_certs:
+ description:
+ - When set to C(no), SSL certificates will not be validated for boto versions >= 2.6.0.
+ type: bool
+ default: yes
+ connection_draining_timeout:
+ description:
+ - Wait a specified timeout allowing connections to drain before terminating an instance.
+ type: int
+ idle_timeout:
+ description:
+ - ELB connections from clients and to servers are timed out after this amount of time.
+ type: int
+ cross_az_load_balancing:
+ description:
+ - Distribute load across all configured Availability Zones.
+ - Defaults to C(false).
+ type: bool
+ stickiness:
+ description:
+ - An associative array of stickiness policy settings. Policy will be applied to all listeners (see examples).
+ type: dict
+ wait:
+ description:
+ - When specified, Ansible will check the status of the load balancer to ensure it has been successfully
+ removed from AWS.
+ type: bool
+ default: no
+ wait_timeout:
+ description:
+ - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated.
+ - A maximum of 600 seconds (10 minutes) is allowed.
+ type: int
+ default: 60
+ tags:
+ description:
+ - An associative array of tags. To delete all tags, supply an empty dict (C({})).
+ type: dict
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+# Basic provisioning example (non-VPC)
+
+- amazon.aws.ec2_elb_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http # options are http, https, ssl, tcp
+ load_balancer_port: 80
+ instance_port: 80
+ proxy_protocol: True
+ - protocol: https
+ load_balancer_port: 443
+ instance_protocol: http # optional, defaults to value of protocol setting
+ instance_port: 80
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
+
+# Internal ELB example
+
+- amazon.aws.ec2_elb_lb:
+ name: "test-vpc"
+ scheme: internal
+ state: present
+ instance_ids:
+ - i-abcd1234
+ purge_instance_ids: true
+ subnets:
+ - subnet-abcd1234
+ - subnet-1a2b3c4d
+ listeners:
+ - protocol: http # options are http, https, ssl, tcp
+ load_balancer_port: 80
+ instance_port: 80
+
+# Configure a health check and the access logs
+- amazon.aws.ec2_elb_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: http # options are http, https, ssl, tcp
+ ping_port: 80
+ ping_path: "/index.html" # not required for tcp or ssl
+ response_timeout: 5 # seconds
+ interval: 30 # seconds
+ unhealthy_threshold: 2
+ healthy_threshold: 10
+ access_logs:
+ interval: 5 # minutes (defaults to 60)
+ s3_location: "my-bucket" # This value is required if access_logs is set
+ s3_prefix: "logs"
+
+# Ensure ELB is gone
+- amazon.aws.ec2_elb_lb:
+ name: "test-please-delete"
+ state: absent
+
+# Ensure ELB is gone and wait for check (for default timeout)
+- amazon.aws.ec2_elb_lb:
+ name: "test-please-delete"
+ state: absent
+ wait: yes
+
+# Ensure ELB is gone and wait for check with timeout value
+- amazon.aws.ec2_elb_lb:
+ name: "test-please-delete"
+ state: absent
+ wait: yes
+ wait_timeout: 600
+
+# Normally, this module will purge any listeners that exist on the ELB
+# but aren't specified in the listeners parameter. If purge_listeners is
+# false it leaves them alone
+- amazon.aws.ec2_elb_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_listeners: no
+
+# Normally, this module will leave availability zones that are enabled
+# on the ELB alone. If purge_zones is true, then any extraneous zones
+# will be removed
+- amazon.aws.ec2_elb_lb:
+ name: "test-please-delete"
+ state: present
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_zones: yes
+
+# Creates a ELB and assigns a list of subnets to it.
+- amazon.aws.ec2_elb_lb:
+ state: present
+ name: 'New ELB'
+ security_group_ids: 'sg-123456, sg-67890'
+ region: us-west-2
+ subnets: 'subnet-123456,subnet-67890'
+ purge_subnets: yes
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+
+# Create an ELB with connection draining, increased idle timeout and cross availability
+# zone load balancing
+- amazon.aws.ec2_elb_lb:
+ name: "New ELB"
+ state: present
+ connection_draining_timeout: 60
+ idle_timeout: 300
+ cross_az_load_balancing: "yes"
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+
+# Create an ELB with load balancer stickiness enabled
+- amazon.aws.ec2_elb_lb:
+ name: "New ELB"
+ state: present
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ stickiness:
+ type: loadbalancer
+ enabled: yes
+ expiration: 300
+
+# Create an ELB with application stickiness enabled
+- amazon.aws.ec2_elb_lb:
+ name: "New ELB"
+ state: present
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ stickiness:
+ type: application
+ enabled: yes
+ cookie: SESSIONID
+
+# Create an ELB and add tags
+- amazon.aws.ec2_elb_lb:
+ name: "New ELB"
+ state: present
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ tags:
+ Name: "New ELB"
+ stack: "production"
+ client: "Bob"
+
+# Delete all tags from an ELB
+- amazon.aws.ec2_elb_lb:
+ name: "New ELB"
+ state: present
+ region: us-east-1
+ zones:
+ - us-east-1a
+ - us-east-1d
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ tags: {}
+"""
+
+import random
+import time
+
+try:
+ import boto
+ import boto.ec2.elb
+ import boto.ec2.elb.attributes
+ import boto.vpc
+ from boto.ec2.elb.healthcheck import HealthCheck
+ from boto.ec2.tag import Tag
+except ImportError:
+ pass # Taken care of by ec2.HAS_BOTO
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AnsibleAWSError
+from ..module_utils.ec2 import HAS_BOTO
+from ..module_utils.ec2 import connect_to_aws
+from ..module_utils.ec2 import get_aws_connection_info
+
+
+def _throttleable_operation(max_retries):
+ def _operation_wrapper(op):
+ def _do_op(*args, **kwargs):
+ retry = 0
+ while True:
+ try:
+ return op(*args, **kwargs)
+ except boto.exception.BotoServerError as e:
+ if retry < max_retries and e.code in \
+ ("Throttling", "RequestLimitExceeded"):
+ retry = retry + 1
+ time.sleep(min(random.random() * (2 ** retry), 300))
+ continue
+ else:
+ raise
+ return _do_op
+ return _operation_wrapper
+
+
+def _get_vpc_connection(module, region, aws_connect_params):
+ try:
+ return connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json_aws(e, 'Failed to connect to AWS')
+
+
+_THROTTLING_RETRIES = 5
+
+
+class ElbManager(object):
+ """Handles ELB creation and destruction"""
+
+ def __init__(self, module, name, listeners=None, purge_listeners=None,
+ zones=None, purge_zones=None, security_group_ids=None,
+ health_check=None, subnets=None, purge_subnets=None,
+ scheme="internet-facing", connection_draining_timeout=None,
+ idle_timeout=None,
+ cross_az_load_balancing=None, access_logs=None,
+ stickiness=None, wait=None, wait_timeout=None, tags=None,
+ region=None,
+ instance_ids=None, purge_instance_ids=None, **aws_connect_params):
+
+ self.module = module
+ self.name = name
+ self.listeners = listeners
+ self.purge_listeners = purge_listeners
+ self.instance_ids = instance_ids
+ self.purge_instance_ids = purge_instance_ids
+ self.zones = zones
+ self.purge_zones = purge_zones
+ self.security_group_ids = security_group_ids
+ self.health_check = health_check
+ self.subnets = subnets
+ self.purge_subnets = purge_subnets
+ self.scheme = scheme
+ self.connection_draining_timeout = connection_draining_timeout
+ self.idle_timeout = idle_timeout
+ self.cross_az_load_balancing = cross_az_load_balancing
+ self.access_logs = access_logs
+ self.stickiness = stickiness
+ self.wait = wait
+ self.wait_timeout = wait_timeout
+ self.tags = tags
+
+ self.aws_connect_params = aws_connect_params
+ self.region = region
+
+ self.changed = False
+ self.status = 'gone'
+ self.elb_conn = self._get_elb_connection()
+
+ try:
+ self.elb = self._get_elb()
+ except boto.exception.BotoServerError as e:
+ module.fail_json_aws(e, msg='Unable to get all load balancers')
+
+ self.ec2_conn = self._get_ec2_connection()
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def ensure_ok(self):
+ """Create the ELB"""
+ if not self.elb:
+ # Zones and listeners will be added at creation
+ self._create_elb()
+ else:
+ if self._get_scheme():
+ # the only way to change the scheme is by recreating the resource
+ self.ensure_gone()
+ self._create_elb()
+ else:
+ self._set_zones()
+ self._set_security_groups()
+ self._set_elb_listeners()
+ self._set_subnets()
+ self._set_health_check()
+ # boto has introduced support for some ELB attributes in
+ # different versions, so we check first before trying to
+ # set them to avoid errors
+ if self._check_attribute_support('connection_draining'):
+ self._set_connection_draining_timeout()
+ if self._check_attribute_support('connecting_settings'):
+ self._set_idle_timeout()
+ if self._check_attribute_support('cross_zone_load_balancing'):
+ self._set_cross_az_load_balancing()
+ if self._check_attribute_support('access_log'):
+ self._set_access_log()
+ # add sticky options
+ self.select_stickiness_policy()
+
+ # ensure backend server policies are correct
+ self._set_backend_policies()
+ # set/remove instance ids
+ self._set_instance_ids()
+
+ self._set_tags()
+
+ def ensure_gone(self):
+ """Destroy the ELB"""
+ if self.elb:
+ self._delete_elb()
+ if self.wait:
+ elb_removed = self._wait_for_elb_removed()
+ # Unfortunately even though the ELB itself is removed quickly
+ # the interfaces take longer so reliant security groups cannot
+ # be deleted until the interface has registered as removed.
+ elb_interface_removed = self._wait_for_elb_interface_removed()
+ if not (elb_removed and elb_interface_removed):
+ self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
+
+ def get_info(self):
+ try:
+ check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
+ except Exception:
+ check_elb = None
+
+ if not check_elb:
+ info = {
+ 'name': self.name,
+ 'status': self.status,
+ 'region': self.region
+ }
+ else:
+ try:
+ lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
+ except Exception:
+ lb_cookie_policy = None
+ try:
+ app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
+ except Exception:
+ app_cookie_policy = None
+
+ info = {
+ 'name': check_elb.name,
+ 'dns_name': check_elb.dns_name,
+ 'zones': check_elb.availability_zones,
+ 'security_group_ids': check_elb.security_groups,
+ 'status': self.status,
+ 'subnets': self.subnets,
+ 'scheme': check_elb.scheme,
+ 'hosted_zone_name': check_elb.canonical_hosted_zone_name,
+ 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
+ 'lb_cookie_policy': lb_cookie_policy,
+ 'app_cookie_policy': app_cookie_policy,
+ 'proxy_policy': self._get_proxy_protocol_policy(),
+ 'backends': self._get_backend_policies(),
+ 'instances': [instance.id for instance in check_elb.instances],
+ 'out_of_service_count': 0,
+ 'in_service_count': 0,
+ 'unknown_instance_state_count': 0,
+ 'region': self.region
+ }
+
+ # status of instances behind the ELB
+ if info['instances']:
+ info['instance_health'] = [dict(
+ instance_id=instance_state.instance_id,
+ reason_code=instance_state.reason_code,
+ state=instance_state.state
+ ) for instance_state in self.elb_conn.describe_instance_health(self.name)]
+ else:
+ info['instance_health'] = []
+
+ # instance state counts: InService or OutOfService
+ if info['instance_health']:
+ for instance_state in info['instance_health']:
+ if instance_state['state'] == "InService":
+ info['in_service_count'] += 1
+ elif instance_state['state'] == "OutOfService":
+ info['out_of_service_count'] += 1
+ else:
+ info['unknown_instance_state_count'] += 1
+
+ if check_elb.health_check:
+ info['health_check'] = {
+ 'target': check_elb.health_check.target,
+ 'interval': check_elb.health_check.interval,
+ 'timeout': check_elb.health_check.timeout,
+ 'healthy_threshold': check_elb.health_check.healthy_threshold,
+ 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
+ }
+
+ if check_elb.listeners:
+ info['listeners'] = [self._api_listener_as_tuple(l)
+ for l in check_elb.listeners]
+ elif self.status == 'created':
+ # When creating a new ELB, listeners don't show in the
+ # immediately returned result, so just include the
+ # ones that were added
+ info['listeners'] = [self._listener_as_tuple(l)
+ for l in self.listeners]
+ else:
+ info['listeners'] = []
+
+ if self._check_attribute_support('connection_draining'):
+ info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
+
+ if self._check_attribute_support('connecting_settings'):
+ info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
+
+ if self._check_attribute_support('cross_zone_load_balancing'):
+ is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
+ if is_cross_az_lb_enabled:
+ info['cross_az_load_balancing'] = 'yes'
+ else:
+ info['cross_az_load_balancing'] = 'no'
+
+ # return stickiness info?
+
+ info['tags'] = self.tags
+
+ return info
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def _wait_for_elb_removed(self):
+ polling_increment_secs = 15
+ max_retries = (self.wait_timeout // polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ self.elb_conn.get_all_lb_attributes(self.name)
+ except (boto.exception.BotoServerError, Exception) as e:
+ if "LoadBalancerNotFound" in e.code:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+
+ return status_achieved
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def _wait_for_elb_interface_removed(self):
+ polling_increment_secs = 15
+ max_retries = (self.wait_timeout // polling_increment_secs)
+ status_achieved = False
+
+ elb_interfaces = self.ec2_conn.get_all_network_interfaces(
+ filters={'attachment.instance-owner-id': 'amazon-elb',
+ 'description': 'ELB {0}'.format(self.name)})
+
+ for x in range(0, max_retries):
+ for interface in elb_interfaces:
+ try:
+ result = self.ec2_conn.get_all_network_interfaces(interface.id)
+ if result == []:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except (boto.exception.BotoServerError, Exception) as e:
+ if 'InvalidNetworkInterfaceID' in e.code:
+ status_achieved = True
+ break
+ else:
+ self.module.fail_json_aws(e, 'Failure while waiting for interface to be removed')
+
+ return status_achieved
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def _get_elb(self):
+ elbs = self.elb_conn.get_all_load_balancers()
+ for elb in elbs:
+ if self.name == elb.name:
+ self.status = 'ok'
+ return elb
+
+ def _get_elb_connection(self):
+ try:
+ return connect_to_aws(boto.ec2.elb, self.region,
+ **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ self.module.fail_json_aws(e, 'Failure while connecting to AWS')
+
+ def _get_ec2_connection(self):
+ try:
+ return connect_to_aws(boto.ec2, self.region,
+ **self.aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, Exception) as e:
+ self.module.fail_json_aws(e, 'Failure while connecting to AWS')
+
+ @_throttleable_operation(_THROTTLING_RETRIES)
+ def _delete_elb(self):
+ # True if succeeds, exception raised if not
+ result = self.elb_conn.delete_load_balancer(name=self.name)
+ if result:
+ self.changed = True
+ self.status = 'deleted'
+
+ def _create_elb(self):
+ listeners = [self._listener_as_tuple(l) for l in self.listeners]
+ self.elb = self.elb_conn.create_load_balancer(name=self.name,
+ zones=self.zones,
+ security_groups=self.security_group_ids,
+ complex_listeners=listeners,
+ subnets=self.subnets,
+ scheme=self.scheme)
+ if self.elb:
+ # HACK: Work around a boto bug in which the listeners attribute is
+ # always set to the listeners argument to create_load_balancer, and
+ # not the complex_listeners
+ # We're not doing a self.elb = self._get_elb here because there
+ # might be eventual consistency issues and it doesn't necessarily
+ # make sense to wait until the ELB gets returned from the EC2 API.
+ # This is necessary in the event we hit the throttling errors and
+ # need to retry ensure_ok
+ # See https://github.com/boto/boto/issues/3526
+ self.elb.listeners = self.listeners
+ self.changed = True
+ self.status = 'created'
+
+ def _create_elb_listeners(self, listeners):
+ """Takes a list of listener tuples and creates them"""
+ # True if succeeds, exception raised if not
+ self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
+ complex_listeners=listeners)
+
+ def _delete_elb_listeners(self, listeners):
+ """Takes a list of listener tuples and deletes them from the elb"""
+ ports = [l[0] for l in listeners]
+
+ # True if succeeds, exception raised if not
+ self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
+ ports)
+
+ def _set_elb_listeners(self):
+ """
+ Creates listeners specified by self.listeners; overwrites existing
+ listeners on these ports; removes extraneous listeners
+ """
+ listeners_to_add = []
+ listeners_to_remove = []
+ listeners_to_keep = []
+
+ # Check for any listeners we need to create or overwrite
+ for listener in self.listeners:
+ listener_as_tuple = self._listener_as_tuple(listener)
+
+ # First we loop through existing listeners to see if one is
+ # already specified for this port
+ existing_listener_found = None
+ for existing_listener in self.elb.listeners:
+ # Since ELB allows only one listener on each incoming port, a
+ # single match on the incoming port is all we're looking for
+ if existing_listener[0] == int(listener['load_balancer_port']):
+ existing_listener_found = self._api_listener_as_tuple(existing_listener)
+ break
+
+ if existing_listener_found:
+ # Does it match exactly?
+ if listener_as_tuple != existing_listener_found:
+ # The ports are the same but something else is different,
+ # so we'll remove the existing one and add the new one
+ listeners_to_remove.append(existing_listener_found)
+ listeners_to_add.append(listener_as_tuple)
+ else:
+ # We already have this listener, so we're going to keep it
+ listeners_to_keep.append(existing_listener_found)
+ else:
+ # We didn't find an existing listener, so just add the new one
+ listeners_to_add.append(listener_as_tuple)
+
+ # Check for any extraneous listeners we need to remove, if desired
+ if self.purge_listeners:
+ for existing_listener in self.elb.listeners:
+ existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
+ if existing_listener_tuple in listeners_to_remove:
+ # Already queued for removal
+ continue
+ if existing_listener_tuple in listeners_to_keep:
+ # Keep this one around
+ continue
+ # Since we're not already removing it and we don't need to keep
+ # it, let's get rid of it
+ listeners_to_remove.append(existing_listener_tuple)
+
+ if listeners_to_remove:
+ self._delete_elb_listeners(listeners_to_remove)
+
+ if listeners_to_add:
+ self._create_elb_listeners(listeners_to_add)
+
+ def _api_listener_as_tuple(self, listener):
+ """Adds ssl_certificate_id to ELB API tuple if present"""
+ base_tuple = listener.get_complex_tuple()
+ if listener.ssl_certificate_id and len(base_tuple) < 5:
+ return base_tuple + (listener.ssl_certificate_id,)
+ return base_tuple
+
+ def _listener_as_tuple(self, listener):
+ """Formats listener as a 4- or 5-tuples, in the order specified by the
+ ELB API"""
+ # N.B. string manipulations on protocols below (str(), upper()) is to
+ # ensure format matches output from ELB API
+ listener_list = [
+ int(listener['load_balancer_port']),
+ int(listener['instance_port']),
+ str(listener['protocol'].upper()),
+ ]
+
+ # Instance protocol is not required by ELB API; it defaults to match
+ # load balancer protocol. We'll mimic that behavior here
+ if 'instance_protocol' in listener:
+ listener_list.append(str(listener['instance_protocol'].upper()))
+ else:
+ listener_list.append(str(listener['protocol'].upper()))
+
+ if 'ssl_certificate_id' in listener:
+ listener_list.append(str(listener['ssl_certificate_id']))
+
+ return tuple(listener_list)
+
+ def _enable_zones(self, zones):
+ try:
+ self.elb.enable_zones(zones)
+ except boto.exception.BotoServerError as e:
+ self.module.fail_json_aws(e, msg='unable to enable zones')
+
+ self.changed = True
+
+ def _disable_zones(self, zones):
+ try:
+ self.elb.disable_zones(zones)
+ except boto.exception.BotoServerError as e:
+ self.module.fail_json_aws(e, msg='unable to disable zones')
+ self.changed = True
+
+ def _attach_subnets(self, subnets):
+ self.elb_conn.attach_lb_to_subnets(self.name, subnets)
+ self.changed = True
+
+ def _detach_subnets(self, subnets):
+ self.elb_conn.detach_lb_from_subnets(self.name, subnets)
+ self.changed = True
+
+ def _set_subnets(self):
+ """Determine which subnets need to be attached or detached on the ELB"""
+ if self.subnets:
+ if self.purge_subnets:
+ subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
+ subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
+ else:
+ subnets_to_detach = None
+ subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
+
+ if subnets_to_attach:
+ self._attach_subnets(subnets_to_attach)
+ if subnets_to_detach:
+ self._detach_subnets(subnets_to_detach)
+
+ def _get_scheme(self):
+ """Determine if the current scheme is different than the scheme of the ELB"""
+ if self.scheme:
+ if self.elb.scheme != self.scheme:
+ if not self.wait:
+ self.module.fail_json(msg="Unable to modify scheme without using the wait option")
+ return True
+ return False
+
+ def _set_zones(self):
+ """Determine which zones need to be enabled or disabled on the ELB"""
+ if self.zones:
+ if self.purge_zones:
+ zones_to_disable = list(set(self.elb.availability_zones) -
+ set(self.zones))
+ zones_to_enable = list(set(self.zones) -
+ set(self.elb.availability_zones))
+ else:
+ zones_to_disable = None
+ zones_to_enable = list(set(self.zones) -
+ set(self.elb.availability_zones))
+ if zones_to_enable:
+ self._enable_zones(zones_to_enable)
+ # N.B. This must come second, in case it would have removed all zones
+ if zones_to_disable:
+ self._disable_zones(zones_to_disable)
+
+ def _set_security_groups(self):
+ if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
+ self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
+ self.changed = True
+
+ def _set_health_check(self):
+ """Set health check values on ELB as needed"""
+ if self.health_check:
+ # This just makes it easier to compare each of the attributes
+ # and look for changes. Keys are attributes of the current
+ # health_check; values are desired values of new health_check
+ health_check_config = {
+ "target": self._get_health_check_target(),
+ "timeout": self.health_check['response_timeout'],
+ "interval": self.health_check['interval'],
+ "unhealthy_threshold": self.health_check['unhealthy_threshold'],
+ "healthy_threshold": self.health_check['healthy_threshold'],
+ }
+
+ update_health_check = False
+
+ # The health_check attribute is *not* set on newly created
+ # ELBs! So we have to create our own.
+ if not self.elb.health_check:
+ self.elb.health_check = HealthCheck()
+
+ for attr, desired_value in health_check_config.items():
+ if getattr(self.elb.health_check, attr) != desired_value:
+ setattr(self.elb.health_check, attr, desired_value)
+ update_health_check = True
+
+ if update_health_check:
+ self.elb.configure_health_check(self.elb.health_check)
+ self.changed = True
+
+ def _check_attribute_support(self, attr):
+ return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
+
+ def _set_cross_az_load_balancing(self):
+ attributes = self.elb.get_attributes()
+ if self.cross_az_load_balancing:
+ if not attributes.cross_zone_load_balancing.enabled:
+ self.changed = True
+ attributes.cross_zone_load_balancing.enabled = True
+ else:
+ if attributes.cross_zone_load_balancing.enabled:
+ self.changed = True
+ attributes.cross_zone_load_balancing.enabled = False
+ self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
+ attributes.cross_zone_load_balancing.enabled)
+
+ def _set_access_log(self):
+ attributes = self.elb.get_attributes()
+ if self.access_logs:
+ if 's3_location' not in self.access_logs:
+ self.module.fail_json(msg='s3_location information required')
+
+ access_logs_config = {
+ "enabled": True,
+ "s3_bucket_name": self.access_logs['s3_location'],
+ "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
+ "emit_interval": self.access_logs.get('interval', 60),
+ }
+
+ update_access_logs_config = False
+ for attr, desired_value in access_logs_config.items():
+ if getattr(attributes.access_log, attr) != desired_value:
+ setattr(attributes.access_log, attr, desired_value)
+ update_access_logs_config = True
+ if update_access_logs_config:
+ self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
+ self.changed = True
+ elif attributes.access_log.enabled:
+ attributes.access_log.enabled = False
+ self.changed = True
+ self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
+
+ def _set_connection_draining_timeout(self):
+ attributes = self.elb.get_attributes()
+ if self.connection_draining_timeout is not None:
+ if not attributes.connection_draining.enabled or \
+ attributes.connection_draining.timeout != self.connection_draining_timeout:
+ self.changed = True
+ attributes.connection_draining.enabled = True
+ attributes.connection_draining.timeout = self.connection_draining_timeout
+ self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
+ else:
+ if attributes.connection_draining.enabled:
+ self.changed = True
+ attributes.connection_draining.enabled = False
+ self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
+
+ def _set_idle_timeout(self):
+ attributes = self.elb.get_attributes()
+ if self.idle_timeout is not None:
+ if attributes.connecting_settings.idle_timeout != self.idle_timeout:
+ self.changed = True
+ attributes.connecting_settings.idle_timeout = self.idle_timeout
+ self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
+
+ def _policy_name(self, policy_type):
+ return 'ec2-elb-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict'))
+
+ def _create_policy(self, policy_param, policy_meth, policy):
+ getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
+
+ def _delete_policy(self, elb_name, policy):
+ self.elb_conn.delete_lb_policy(elb_name, policy)
+
+ def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
+ self._delete_policy(self.elb.name, policy)
+ self._create_policy(policy_param, policy_meth, policy)
+
+ def _set_listener_policy(self, listeners_dict, policy=None):
+ policy = [] if policy is None else policy
+
+ for listener_port in listeners_dict:
+ if listeners_dict[listener_port].startswith('HTTP'):
+ self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
+
+ def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
+ for p in getattr(elb_info.policies, policy_attrs['attr']):
+ if str(p.__dict__['policy_name']) == str(policy[0]):
+ if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
+ self._set_listener_policy(listeners_dict)
+ self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
+ self.changed = True
+ break
+ else:
+ self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
+ self.changed = True
+
+ self._set_listener_policy(listeners_dict, policy)
+
+ def select_stickiness_policy(self):
+ if self.stickiness:
+
+ if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
+ self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
+
+ elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
+ d = {}
+ for listener in elb_info.listeners:
+ d[listener[0]] = listener[2]
+ listeners_dict = d
+
+ if self.stickiness['type'] == 'loadbalancer':
+ policy = []
+ policy_type = 'LBCookieStickinessPolicyType'
+
+ if self.module.boolean(self.stickiness['enabled']):
+
+ if 'expiration' not in self.stickiness:
+ self.module.fail_json(msg='expiration must be set when type is loadbalancer')
+
+ try:
+ expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None
+ except ValueError:
+ self.module.fail_json(msg='expiration must be set to an integer')
+
+ policy_attrs = {
+ 'type': policy_type,
+ 'attr': 'lb_cookie_stickiness_policies',
+ 'method': 'create_lb_cookie_stickiness_policy',
+ 'dict_key': 'cookie_expiration_period',
+ 'param_value': expiration
+ }
+ policy.append(self._policy_name(policy_attrs['type']))
+
+ self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
+ elif not self.module.boolean(self.stickiness['enabled']):
+ if len(elb_info.policies.lb_cookie_stickiness_policies):
+ if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
+ self.changed = True
+ else:
+ self.changed = False
+ self._set_listener_policy(listeners_dict)
+ self._delete_policy(self.elb.name, self._policy_name(policy_type))
+
+ elif self.stickiness['type'] == 'application':
+ policy = []
+ policy_type = 'AppCookieStickinessPolicyType'
+ if self.module.boolean(self.stickiness['enabled']):
+
+ if 'cookie' not in self.stickiness:
+ self.module.fail_json(msg='cookie must be set when type is application')
+
+ policy_attrs = {
+ 'type': policy_type,
+ 'attr': 'app_cookie_stickiness_policies',
+ 'method': 'create_app_cookie_stickiness_policy',
+ 'dict_key': 'cookie_name',
+ 'param_value': self.stickiness['cookie']
+ }
+ policy.append(self._policy_name(policy_attrs['type']))
+ self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
+ elif not self.module.boolean(self.stickiness['enabled']):
+ if len(elb_info.policies.app_cookie_stickiness_policies):
+ if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
+ self.changed = True
+ self._set_listener_policy(listeners_dict)
+ self._delete_policy(self.elb.name, self._policy_name(policy_type))
+
+ else:
+ self._set_listener_policy(listeners_dict)
+
+ def _get_backend_policies(self):
+ """Get a list of backend policies"""
+ policies = []
+ if self.elb.backends is not None:
+ for backend in self.elb.backends:
+ if backend.policies is not None:
+ for policy in backend.policies:
+ policies.append(str(backend.instance_port) + ':' + policy.policy_name)
+
+ return policies
+
+ def _set_backend_policies(self):
+ """Sets policies for all backends"""
+ ensure_proxy_protocol = False
+ replace = []
+ backend_policies = self._get_backend_policies()
+
+ # Find out what needs to be changed
+ for listener in self.listeners:
+ want = False
+
+ if 'proxy_protocol' in listener and listener['proxy_protocol']:
+ ensure_proxy_protocol = True
+ want = True
+
+ if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
+ if not want:
+ replace.append({'port': listener['instance_port'], 'policies': []})
+ elif want:
+ replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
+
+ # enable or disable proxy protocol
+ if ensure_proxy_protocol:
+ self._set_proxy_protocol_policy()
+
+ # Make the backend policies so
+ for item in replace:
+ self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
+ self.changed = True
+
+ def _get_proxy_protocol_policy(self):
+ """Find out if the elb has a proxy protocol enabled"""
+ if self.elb.policies is not None and self.elb.policies.other_policies is not None:
+ for policy in self.elb.policies.other_policies:
+ if policy.policy_name == 'ProxyProtocol-policy':
+ return policy.policy_name
+
+ return None
+
+ def _set_proxy_protocol_policy(self):
+ """Install a proxy protocol policy if needed"""
+ proxy_policy = self._get_proxy_protocol_policy()
+
+ if proxy_policy is None:
+ self.elb_conn.create_lb_policy(
+ self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
+ )
+ self.changed = True
+
+ # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
+
+ def _diff_list(self, a, b):
+ """Find the entries in list a that are not in list b"""
+ b = set(b)
+ return [aa for aa in a if aa not in b]
+
+ def _get_instance_ids(self):
+ """Get the current list of instance ids installed in the elb"""
+ instances = []
+ if self.elb.instances is not None:
+ for instance in self.elb.instances:
+ instances.append(instance.id)
+
+ return instances
+
+ def _set_instance_ids(self):
+ """Register or deregister instances from an lb instance"""
+ assert_instances = self.instance_ids or []
+
+ has_instances = self._get_instance_ids()
+
+ add_instances = self._diff_list(assert_instances, has_instances)
+ if add_instances:
+ self.elb_conn.register_instances(self.elb.name, add_instances)
+ self.changed = True
+
+ if self.purge_instance_ids:
+ remove_instances = self._diff_list(has_instances, assert_instances)
+ if remove_instances:
+ self.elb_conn.deregister_instances(self.elb.name, remove_instances)
+ self.changed = True
+
+ def _set_tags(self):
+ """Add/Delete tags"""
+ if self.tags is None:
+ return
+
+ params = {'LoadBalancerNames.member.1': self.name}
+
+ tagdict = dict()
+
+ # get the current list of tags from the ELB, if ELB exists
+ if self.elb:
+ current_tags = self.elb_conn.get_list('DescribeTags', params,
+ [('member', Tag)])
+ tagdict = dict((tag.Key, tag.Value) for tag in current_tags
+ if hasattr(tag, 'Key'))
+
+ # Add missing tags
+ dictact = dict(set(self.tags.items()) - set(tagdict.items()))
+ if dictact:
+ for i, key in enumerate(dictact):
+ params['Tags.member.%d.Key' % (i + 1)] = key
+ params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
+
+ self.elb_conn.make_request('AddTags', params)
+ self.changed = True
+
+ # Remove extra tags
+ dictact = dict(set(tagdict.items()) - set(self.tags.items()))
+ if dictact:
+ for i, key in enumerate(dictact):
+ params['Tags.member.%d.Key' % (i + 1)] = key
+
+ self.elb_conn.make_request('RemoveTags', params)
+ self.changed = True
+
+ def _get_health_check_target(self):
+ """Compose target string from healthcheck parameters"""
+ protocol = self.health_check['ping_protocol'].upper()
+ path = ""
+
+ if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
+ path = self.health_check['ping_path']
+
+ return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
+
+
+def main():
+ argument_spec = dict(
+ state={'required': True, 'choices': ['present', 'absent']},
+ name={'required': True},
+ listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'},
+ purge_listeners={'default': True, 'required': False, 'type': 'bool'},
+ instance_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
+ zones={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ purge_zones={'default': False, 'required': False, 'type': 'bool'},
+ security_group_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ security_group_names={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ health_check={'default': None, 'required': False, 'type': 'dict'},
+ subnets={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
+ purge_subnets={'default': False, 'required': False, 'type': 'bool'},
+ scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']},
+ connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
+ idle_timeout={'default': None, 'type': 'int', 'required': False},
+ cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
+ stickiness={'default': None, 'required': False, 'type': 'dict'},
+ access_logs={'default': None, 'required': False, 'type': 'dict'},
+ wait={'default': False, 'type': 'bool', 'required': False},
+ wait_timeout={'default': 60, 'type': 'int', 'required': False},
+ tags={'default': None, 'required': False, 'type': 'dict'}
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ check_boto3=False,
+ mutually_exclusive=[['security_group_ids', 'security_group_names']]
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+
+ name = module.params['name']
+ state = module.params['state']
+ listeners = module.params['listeners']
+ purge_listeners = module.params['purge_listeners']
+ instance_ids = module.params['instance_ids']
+ purge_instance_ids = module.params['purge_instance_ids']
+ zones = module.params['zones']
+ purge_zones = module.params['purge_zones']
+ security_group_ids = module.params['security_group_ids']
+ security_group_names = module.params['security_group_names']
+ health_check = module.params['health_check']
+ access_logs = module.params['access_logs']
+ subnets = module.params['subnets']
+ purge_subnets = module.params['purge_subnets']
+ scheme = module.params['scheme']
+ connection_draining_timeout = module.params['connection_draining_timeout']
+ idle_timeout = module.params['idle_timeout']
+ cross_az_load_balancing = module.params['cross_az_load_balancing']
+ stickiness = module.params['stickiness']
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout']
+ tags = module.params['tags']
+
+ if state == 'present' and not listeners:
+ module.fail_json(msg="At least one listener is required for ELB creation")
+
+ if state == 'present' and not (zones or subnets):
+ module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
+
+ if wait_timeout > 600:
+ module.fail_json(msg='wait_timeout maximum is 600 seconds')
+
+ if security_group_names:
+ security_group_ids = []
+ try:
+ ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ if subnets: # We have at least one subnet, ergo this is a VPC
+ vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
+ vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
+ filters = {'vpc_id': vpc_id}
+ else:
+ filters = None
+ grp_details = ec2.get_all_security_groups(filters=filters)
+
+ for group_name in security_group_names:
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+
+ group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
+ security_group_ids.extend(group_id)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json_aws(e)
+
+ elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
+ purge_zones, security_group_ids, health_check,
+ subnets, purge_subnets, scheme,
+ connection_draining_timeout, idle_timeout,
+ cross_az_load_balancing,
+ access_logs, stickiness, wait, wait_timeout, tags,
+ region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
+ **aws_connect_params)
+
+ # check for unsupported attributes for this version of boto
+ if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
+ module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
+
+ if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
+ module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
+
+ if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
+ module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
+
+ if state == 'present':
+ elb_man.ensure_ok()
+ elif state == 'absent':
+ elb_man.ensure_gone()
+
+ ansible_facts = {'ec2_elb': 'info'}
+ ec2_facts_result = dict(changed=elb_man.changed,
+ elb=elb_man.get_info(),
+ ansible_facts=ansible_facts)
+
+ module.exit_json(**ec2_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
new file mode 100644
index 00000000..01a81f99
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
@@ -0,0 +1,882 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni
+version_added: 1.0.0
+short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
+description:
+ - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is
+ provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status
+ of the network interface.
+author:
+ - "Rob White (@wimnat)"
+ - "Mike Healey (@healem)"
+options:
+ eni_id:
+ description:
+ - The ID of the ENI (to modify).
+ - If I(eni_id=None) and I(state=present), a new eni will be created.
+ type: str
+ instance_id:
+ description:
+ - Instance ID that you wish to attach ENI to.
+ - Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None).
+ type: str
+ private_ip_address:
+ description:
+ - Private IP address.
+ type: str
+ subnet_id:
+ description:
+ - ID of subnet in which to create the ENI.
+ type: str
+ description:
+ description:
+ - Optional description of the ENI.
+ type: str
+ security_groups:
+ description:
+ - List of security groups associated with the interface. Only used when I(state=present).
+ - Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
+ type: list
+ elements: str
+ state:
+ description:
+ - Create or delete ENI.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ device_index:
+ description:
+ - The index of the device for the network interface attachment on the instance.
+ default: 0
+ type: int
+ attached:
+ description:
+ - Specifies if network interface should be attached or detached from instance. If omitted, attachment status
+ won't change
+ type: bool
+ force_detach:
+ description:
+ - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)
+ or when deleting an interface with I(state=absent).
+ default: false
+ type: bool
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
+ interface is being modified, not on creation.
+ required: false
+ type: bool
+ source_dest_check:
+ description:
+ - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
+ You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ type: bool
+ secondary_private_ip_addresses:
+ description:
+ - A list of IP addresses to assign as secondary IP addresses to the network interface.
+ This option is mutually exclusive of I(secondary_private_ip_address_count)
+ required: false
+ type: list
+ elements: str
+ purge_secondary_private_ip_addresses:
+ description:
+ - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
+ - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.
+ default: false
+ type: bool
+ secondary_private_ip_address_count:
+ description:
+ - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses)
+ required: false
+ type: int
+ allow_reassignment:
+ description:
+ - Indicates whether to allow an IP address that is already assigned to another network interface or instance
+ to be reassigned to the specified network interface.
+ required: false
+ default: false
+ type: bool
+ name:
+ description:
+ - Name for the ENI. This will create a tag called "Name" with the value assigned here.
+ - This can be used in conjunction with I(subnet_id) as another means of identifiying a network interface.
+ - AWS does not enforce unique Name tags, so duplicate names are possible if you configure it that way.
+ If that is the case, you will need to provide other identifying information such as I(private_ip_address) or I(eni_id).
+ required: false
+ type: str
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new ENI or to add/remove from an existing one. Please note that
+ the name field sets the "Name" tag.
+ - To clear all tags, set this option to an empty dictionary to use in conjunction with I(purge_tags).
+ If you provide I(name), that tag will not be removed.
+ - To prevent removing any tags set I(purge_tags) to false.
+ type: dict
+ required: false
+ version_added: 1.3.0
+ purge_tags:
+ description:
+ - Indicates whether to remove tags not specified in I(tags) or I(name). This means you have to specify all
+ the desired tags on each task affecting a network interface.
+ - If I(tags) is omitted or None this option is disregarded.
+ default: true
+ type: bool
+ version_added: 1.3.0
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+notes:
+ - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),
+ or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ENI. As no security group is defined, ENI will be created in default security group
+- amazon.aws.ec2_eni:
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI and attach it to an instance
+- amazon.aws.ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI with two secondary addresses
+- amazon.aws.ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ secondary_private_ip_address_count: 2
+
+# Assign a secondary IP address to an existing ENI
+# This will purge any existing IPs
+- amazon.aws.ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_addresses:
+ - 172.16.1.1
+
+# Remove any secondary IP addresses from an existing ENI
+- amazon.aws.ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_address_count: 0
+
+# Destroy an ENI, detaching it from any instance if necessary
+- amazon.aws.ec2_eni:
+ eni_id: eni-xxxxxxx
+ force_detach: true
+ state: absent
+
+# Update an ENI
+- amazon.aws.ec2_eni:
+ eni_id: eni-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Update an ENI using name and subnet_id
+- amazon.aws.ec2_eni:
+ name: eni-20
+ subnet_id: subnet-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Update an ENI identifying it by private_ip_address and subnet_id
+- amazon.aws.ec2_eni:
+ subnet_id: subnet-xxxxxxx
+ private_ip_address: 172.16.1.1
+ description: "My new description"
+
+# Detach an ENI from an instance
+- amazon.aws.ec2_eni:
+ eni_id: eni-xxxxxxx
+ instance_id: None
+ state: present
+
+### Delete an interface on termination
+# First create the interface
+- amazon.aws.ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ register: eni
+
+# Modify the interface to enable the delete_on_terminaton flag
+- amazon.aws.ec2_eni:
+ eni_id: "{{ eni.interface.id }}"
+ delete_on_termination: true
+
+'''
+
+
+RETURN = '''
+interface:
+ description: Network interface attributes
+ returned: when state != absent
+ type: complex
+ contains:
+ description:
+ description: interface description
+ type: str
+ sample: Firewall network interface
+ groups:
+ description: list of security groups
+ type: list
+ elements: dict
+ sample: [ { "sg-f8a8a9da": "default" } ]
+ id:
+ description: network interface id
+ type: str
+ sample: "eni-1d889198"
+ mac_address:
+ description: interface's physical address
+ type: str
+ sample: "00:00:5E:00:53:23"
+ name:
+ description: The name of the ENI
+ type: str
+ sample: "my-eni-20"
+ owner_id:
+ description: aws account id
+ type: str
+ sample: 812381371
+ private_ip_address:
+ description: primary ip address of this interface
+ type: str
+ sample: 10.20.30.40
+ private_ip_addresses:
+ description: list of all private ip addresses associated to this interface
+ type: list
+ elements: dict
+ sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
+ source_dest_check:
+ description: value of source/dest check flag
+ type: bool
+ sample: True
+ status:
+ description: network interface status
+ type: str
+ sample: "pending"
+ subnet_id:
+ description: which vpc subnet the interface is bound
+ type: str
+ sample: subnet-b0a0393c
+ tags:
+ description: The dictionary of tags associated with the ENI
+ type: dict
+ sample: { "Name": "my-eni", "group": "Finance" }
+ vpc_id:
+ description: which vpc this network interface is bound
+ type: str
+ sample: vpc-9a9a9da
+
+'''
+
+import time
+
+try:
+ import botocore.exceptions
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ..module_utils.ec2 import compare_aws_tags
+from ..module_utils.waiters import get_waiter
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ if "PrivateIpAddresses" in interface:
+ for ip in interface["PrivateIpAddresses"]:
+ private_addresses.append({'private_ip_address': ip["PrivateIpAddress"], 'primary_address': ip["Primary"]})
+
+ groups = {}
+ if "Groups" in interface:
+ for group in interface["Groups"]:
+ groups[group["GroupId"]] = group["GroupName"]
+
+ interface_info = {'id': interface.get("NetworkInterfaceId"),
+ 'subnet_id': interface.get("SubnetId"),
+ 'vpc_id': interface.get("VpcId"),
+ 'description': interface.get("Description"),
+ 'owner_id': interface.get("OwnerId"),
+ 'status': interface.get("Status"),
+ 'mac_address': interface.get("MacAddress"),
+ 'private_ip_address': interface.get("PrivateIpAddress"),
+ 'source_dest_check': interface.get("SourceDestCheck"),
+ 'groups': groups,
+ 'private_ip_addresses': private_addresses
+ }
+
+ if "TagSet" in interface:
+ tags = {}
+ name = None
+ for tag in interface["TagSet"]:
+ tags[tag["Key"]] = tag["Value"]
+ if tag["Key"] == "Name":
+ name = tag["Value"]
+ interface_info["tags"] = tags
+
+ if name is not None:
+ interface_info["name"] = name
+
+ if "Attachment" in interface:
+ interface_info['attachment'] = {
+ 'attachment_id': interface["Attachment"].get("AttachmentId"),
+ 'instance_id': interface["Attachment"].get("InstanceId"),
+ 'device_index': interface["Attachment"].get("DeviceIndex"),
+ 'status': interface["Attachment"].get("Status"),
+ 'attach_time': interface["Attachment"].get("AttachTime"),
+ 'delete_on_termination': interface["Attachment"].get("DeleteOnTermination"),
+ }
+
+ return interface_info
+
+
+def correct_ips(connection, ip_list, module, eni_id):
+ all_there = True
+ eni = describe_eni(connection, module, eni_id)
+ private_addresses = set()
+ if "PrivateIpAddresses" in eni:
+ for ip in eni["PrivateIpAddresses"]:
+ private_addresses.add(ip["PrivateIpAddress"])
+
+ ip_set = set(ip_list)
+
+ return ip_set.issubset(private_addresses)
+
+
+def absent_ips(connection, ip_list, module, eni_id):
+ all_there = True
+ eni = describe_eni(connection, module, eni_id)
+ private_addresses = set()
+ if "PrivateIpAddresses" in eni:
+ for ip in eni["PrivateIpAddresses"]:
+ private_addresses.add(ip["PrivateIpAddress"])
+
+ ip_set = set(ip_list)
+
+ return not ip_set.union(private_addresses)
+
+
+def correct_ip_count(connection, ip_count, module, eni_id):
+ eni = describe_eni(connection, module, eni_id)
+ private_addresses = set()
+ if "PrivateIpAddresses" in eni:
+ for ip in eni["PrivateIpAddresses"]:
+ private_addresses.add(ip["PrivateIpAddress"])
+
+ if len(private_addresses) == ip_count:
+ return True
+ else:
+ return False
+
+
+def wait_for(function_pointer, *args):
+ max_wait = 30
+ interval_time = 3
+ current_wait = 0
+ while current_wait < max_wait:
+ time.sleep(interval_time)
+ current_wait += interval_time
+ if function_pointer(*args):
+ break
+
+
+def create_eni(connection, vpc_id, module):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ if instance_id == 'None':
+ instance_id = None
+ device_index = module.params.get("device_index")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = get_ec2_security_group_ids_from_names(
+ module.params.get('security_groups'),
+ connection,
+ vpc_id=vpc_id,
+ boto3=True
+ )
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ changed = False
+ tags = module.params.get("tags")
+ name = module.params.get("name")
+ purge_tags = module.params.get("purge_tags")
+
+ try:
+ args = {"SubnetId": subnet_id}
+ if private_ip_address:
+ args["PrivateIpAddress"] = private_ip_address
+ if description:
+ args["Description"] = description
+ if len(security_groups) > 0:
+ args["Groups"] = security_groups
+ eni_dict = connection.create_network_interface(aws_retry=True, **args)
+ eni = eni_dict["NetworkInterface"]
+ # Once we have an ID make sure we're always modifying the same object
+ eni_id = eni["NetworkInterfaceId"]
+ get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])
+
+ if attached and instance_id is not None:
+ try:
+ connection.attach_network_interface(
+ aws_retry=True,
+ InstanceId=instance_id,
+ DeviceIndex=device_index,
+ NetworkInterfaceId=eni["NetworkInterfaceId"]
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ raise
+ get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])
+
+ if secondary_private_ip_address_count is not None:
+ try:
+ connection.assign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni["NetworkInterfaceId"],
+ SecondaryPrivateIpAddressCount=secondary_private_ip_address_count
+ )
+ wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ raise
+
+ if secondary_private_ip_addresses is not None:
+ try:
+ connection.assign_private_ip_addresses(
+ NetworkInterfaceId=eni["NetworkInterfaceId"],
+ PrivateIpAddresses=secondary_private_ip_addresses
+ )
+ wait_for(correct_ips, connection, secondary_private_ip_addresses, module, eni_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ raise
+
+ manage_tags(eni, name, tags, purge_tags, connection)
+
+ # Refresh the eni data
+ eni = describe_eni(connection, module, eni_id)
+ changed = True
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(
+ e,
+ "Failed to create eni {0} for {1} in {2} with {3}".format(name, subnet_id, vpc_id, private_ip_address)
+ )
+
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def modify_eni(connection, module, eni):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ device_index = module.params.get("device_index")
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+ force_detach = module.params.get("force_detach")
+ source_dest_check = module.params.get("source_dest_check")
+ delete_on_termination = module.params.get("delete_on_termination")
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ allow_reassignment = module.params.get("allow_reassignment")
+ changed = False
+ tags = module.params.get("tags")
+ name = module.params.get("name")
+ purge_tags = module.params.get("purge_tags")
+
+ eni = uniquely_find_eni(connection, module, eni)
+ eni_id = eni["NetworkInterfaceId"]
+
+ try:
+ if description is not None:
+ if "Description" not in eni or eni["Description"] != description:
+ connection.modify_network_interface_attribute(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ Description={'Value': description}
+ )
+ changed = True
+ if len(security_groups) > 0:
+ groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni["VpcId"], boto3=True)
+ if sorted(get_sec_group_list(eni["Groups"])) != sorted(groups):
+ connection.modify_network_interface_attribute(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ Groups=groups
+ )
+ changed = True
+ if source_dest_check is not None:
+ if "SourceDestCheck" not in eni or eni["SourceDestCheck"] != source_dest_check:
+ connection.modify_network_interface_attribute(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ SourceDestCheck={'Value': source_dest_check}
+ )
+ changed = True
+ if delete_on_termination is not None and "Attachment" in eni:
+ if eni["Attachment"]["DeleteOnTermination"] is not delete_on_termination:
+ connection.modify_network_interface_attribute(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ Attachment={'AttachmentId': eni["Attachment"]["AttachmentId"],
+ 'DeleteOnTermination': delete_on_termination}
+ )
+ changed = True
+ if delete_on_termination:
+ waiter = "network_interface_delete_on_terminate"
+ else:
+ waiter = "network_interface_no_delete_on_terminate"
+ get_waiter(connection, waiter).wait(NetworkInterfaceIds=[eni_id])
+
+ current_secondary_addresses = []
+ if "PrivateIpAddresses" in eni:
+ current_secondary_addresses = [i["PrivateIpAddress"] for i in eni["PrivateIpAddresses"] if not i["Primary"]]
+
+ if secondary_private_ip_addresses is not None:
+ secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
+ if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:
+ connection.unassign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ PrivateIpAddresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)),
+ )
+ wait_for(absent_ips, connection, secondary_addresses_to_remove, module, eni_id)
+ changed = True
+ secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))
+ if secondary_addresses_to_add:
+ connection.assign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ PrivateIpAddresses=secondary_addresses_to_add,
+ AllowReassignment=allow_reassignment
+ )
+ wait_for(correct_ips, connection, secondary_addresses_to_add, module, eni_id)
+ changed = True
+
+ if secondary_private_ip_address_count is not None:
+ current_secondary_address_count = len(current_secondary_addresses)
+ if secondary_private_ip_address_count > current_secondary_address_count:
+ connection.assign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ SecondaryPrivateIpAddressCount=(secondary_private_ip_address_count - current_secondary_address_count),
+ AllowReassignment=allow_reassignment
+ )
+ wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)
+ changed = True
+ elif secondary_private_ip_address_count < current_secondary_address_count:
+ # How many of these addresses do we want to remove
+ secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
+ connection.unassign_private_ip_addresses(
+ aws_retry=True,
+ NetworkInterfaceId=eni_id,
+ PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count]
+ )
+ wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)
+ changed = True
+
+ if attached is True:
+ if "Attachment" in eni and eni["Attachment"]["InstanceId"] != instance_id:
+ detach_eni(connection, eni, module)
+ connection.attach_network_interface(
+ aws_retry=True,
+ InstanceId=instance_id,
+ DeviceIndex=device_index,
+ NetworkInterfaceId=eni_id,
+ )
+ get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])
+ changed = True
+ if "Attachment" not in eni:
+ connection.attach_network_interface(
+ aws_retry=True,
+ InstanceId=instance_id,
+ DeviceIndex=device_index,
+ NetworkInterfaceId=eni_id,
+ )
+ get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])
+ changed = True
+
+ elif attached is False:
+ changed |= detach_eni(connection, eni, module)
+ get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])
+
+ changed |= manage_tags(eni, name, tags, purge_tags, connection)
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to modify eni {0}".format(eni_id))
+
+ eni = describe_eni(connection, module, eni_id)
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def delete_eni(connection, module):
+
+ eni = uniquely_find_eni(connection, module)
+ if not eni:
+ module.exit_json(changed=False)
+
+ eni_id = eni["NetworkInterfaceId"]
+ force_detach = module.params.get("force_detach")
+
+ try:
+ if force_detach is True:
+ if "Attachment" in eni:
+ connection.detach_network_interface(
+ aws_retry=True,
+ AttachmentId=eni["Attachment"]["AttachmentId"],
+ Force=True
+ )
+ # Wait to allow detachment to finish
+ get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ changed = True
+ else:
+ connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)
+ changed = True
+
+ module.exit_json(changed=changed)
+ except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'):
+ module.exit_json(changed=False)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "Failure during delete of {0}".format(eni_id))
+
+
+def detach_eni(connection, eni, module):
+
+ attached = module.params.get("attached")
+ eni_id = eni["NetworkInterfaceId"]
+
+ force_detach = module.params.get("force_detach")
+ if "Attachment" in eni:
+ connection.detach_network_interface(
+ aws_retry=True,
+ AttachmentId=eni["Attachment"]["AttachmentId"],
+ Force=force_detach
+ )
+ get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])
+ return True
+
+ return False
+
+
+def describe_eni(connection, module, eni_id):
+ try:
+ eni_result = connection.describe_network_interfaces(aws_retry=True, NetworkInterfaceIds=[eni_id])
+ if eni_result["NetworkInterfaces"]:
+ return eni_result["NetworkInterfaces"][0]
+ else:
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to describe eni with id: {0}".format(eni_id))
+
+
+def uniquely_find_eni(connection, module, eni=None):
+
+ if eni:
+ # In the case of create, eni_id will not be a param but we can still get the eni_id after creation
+ if "NetworkInterfaceId" in eni:
+ eni_id = eni["NetworkInterfaceId"]
+ else:
+ eni_id = None
+ else:
+ eni_id = module.params.get("eni_id")
+
+ private_ip_address = module.params.get('private_ip_address')
+ subnet_id = module.params.get('subnet_id')
+ instance_id = module.params.get('instance_id')
+ device_index = module.params.get('device_index')
+ attached = module.params.get('attached')
+ name = module.params.get("name")
+
+ filters = []
+
+ # proceed only if we're unequivocally specifying an ENI
+ if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):
+ return None
+
+ if eni_id:
+ filters.append({'Name': 'network-interface-id',
+ 'Values': [eni_id]})
+
+ if private_ip_address and subnet_id and not filters:
+ filters.append({'Name': 'private-ip-address',
+ 'Values': [private_ip_address]})
+ filters.append({'Name': 'subnet-id',
+ 'Values': [subnet_id]})
+
+ if not attached and instance_id and device_index and not filters:
+ filters.append({'Name': 'attachment.instance-id',
+ 'Values': [instance_id]})
+ filters.append({'Name': 'attachment.device-index',
+ 'Values': [device_index]})
+
+ if name and subnet_id and not filters:
+ filters.append({'Name': 'tag:Name',
+ 'Values': [name]})
+ filters.append({'Name': 'subnet-id',
+ 'Values': [subnet_id]})
+
+ if not filters:
+ return None
+
+ try:
+ eni_result = connection.describe_network_interfaces(aws_retry=True, Filters=filters)["NetworkInterfaces"]
+ if len(eni_result) == 1:
+ return eni_result[0]
+ else:
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to find unique eni with filters: {0}".format(filters))
+
+ return None
+
+
+def get_sec_group_list(groups):
+
+ # Build list of remote security groups
+ remote_security_groups = []
+ for group in groups:
+ remote_security_groups.append(group["GroupId"].encode())
+
+ return remote_security_groups
+
+
+def _get_vpc_id(connection, module, subnet_id):
+
+ try:
+ subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id])
+ return subnets["Subnets"][0]["VpcId"]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to get vpc_id for {0}".format(subnet_id))
+
+
+def manage_tags(eni, name, new_tags, purge_tags, connection):
+ changed = False
+
+ if "TagSet" in eni:
+ old_tags = boto3_tag_list_to_ansible_dict(eni['TagSet'])
+ elif new_tags:
+ old_tags = {}
+ else:
+ # No new tags and nothing in TagSet
+ return False
+
+ # Do not purge tags unless tags is not None
+ if new_tags is None:
+ purge_tags = False
+ new_tags = {}
+
+ if name:
+ new_tags['Name'] = name
+
+ tags_to_set, tags_to_delete = compare_aws_tags(
+ old_tags, new_tags,
+ purge_tags=purge_tags,
+ )
+ if tags_to_set:
+ connection.create_tags(
+ aws_retry=True,
+ Resources=[eni['NetworkInterfaceId']],
+ Tags=ansible_dict_to_boto3_tag_list(tags_to_set))
+ changed |= True
+ if tags_to_delete:
+ delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete)
+ connection.delete_tags(
+ aws_retry=True,
+ Resources=[eni['NetworkInterfaceId']],
+ Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values))
+ changed |= True
+ return changed
+
+
+def main():
+ argument_spec = dict(
+ eni_id=dict(default=None, type='str'),
+ instance_id=dict(default=None, type='str'),
+ private_ip_address=dict(type='str'),
+ subnet_id=dict(type='str'),
+ description=dict(type='str'),
+ security_groups=dict(default=[], type='list', elements='str'),
+ device_index=dict(default=0, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_detach=dict(default='no', type='bool'),
+ source_dest_check=dict(default=None, type='bool'),
+ delete_on_termination=dict(default=None, type='bool'),
+ secondary_private_ip_addresses=dict(default=None, type='list', elements='str'),
+ purge_secondary_private_ip_addresses=dict(default=False, type='bool'),
+ secondary_private_ip_address_count=dict(default=None, type='int'),
+ allow_reassignment=dict(default=False, type='bool'),
+ attached=dict(default=None, type='bool'),
+ name=dict(default=None, type='str'),
+ tags=dict(type='dict'),
+ purge_tags=dict(default=True, type='bool')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
+ ],
+ required_if=([
+ ('attached', True, ['instance_id']),
+ ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])
+ ])
+ )
+
+ retry_decorator = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=['IncorrectState'],
+ )
+ connection = module.client('ec2', retry_decorator=retry_decorator)
+ state = module.params.get("state")
+
+ if state == 'present':
+ eni = uniquely_find_eni(connection, module)
+ if eni is None:
+ subnet_id = module.params.get("subnet_id")
+ if subnet_id is None:
+ module.fail_json(msg='subnet_id is required when creating a new ENI')
+
+ vpc_id = _get_vpc_id(connection, module, subnet_id)
+ create_eni(connection, vpc_id, module)
+ else:
+ modify_eni(connection, module, eni)
+
+ elif state == 'absent':
+ delete_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_facts.py
new file mode 100644
index 00000000..4741dfbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_facts.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni_info
+version_added: 1.0.0
+short_description: Gather information about ec2 ENI interfaces in AWS
+description:
+ - Gather information about ec2 ENI interfaces in AWS.
+ - This module was called C(ec2_eni_facts) before Ansible 2.9. The usage did not change.
+author: "Rob White (@wimnat)"
+requirements: [ boto3 ]
+options:
+ eni_id:
+ description:
+ - The ID of the ENI.
+ - This option is mutually exclusive of I(filters).
+ type: str
+ version_added: 1.3.0
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
+ - This option is mutually exclusive of I(eni_id).
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all ENIs
+- amazon.aws.ec2_eni_info:
+
+# Gather information about a particular ENI
+- amazon.aws.ec2_eni_info:
+ filters:
+ network-interface-id: eni-xxxxxxx
+
+'''
+
+RETURN = '''
+network_interfaces:
+ description: List of matching elastic network interfaces
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: Info of associated elastic IP (EIP)
+ returned: When an ENI is associated with an EIP
+ type: dict
+ sample: {
+ allocation_id: "eipalloc-5sdf123",
+ association_id: "eipassoc-8sdf123",
+ ip_owner_id: "4415120123456",
+ public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com",
+ public_ip: "52.1.0.63"
+ }
+ attachment:
+ description: Info about attached ec2 instance
+ returned: When an ENI is attached to an ec2 instance
+ type: dict
+ sample: {
+ attach_time: "2017-08-05T15:25:47+00:00",
+ attachment_id: "eni-attach-149d21234",
+ delete_on_termination: false,
+ device_index: 1,
+ instance_id: "i-15b8d3cadbafa1234",
+ instance_owner_id: "4415120123456",
+ status: "attached"
+ }
+ availability_zone:
+ description: Availability zone of ENI
+ returned: always
+ type: str
+ sample: "us-east-1b"
+ description:
+ description: Description text for ENI
+ returned: always
+ type: str
+ sample: "My favourite network interface"
+ groups:
+ description: List of attached security groups
+ returned: always
+ type: list
+ sample: [
+ {
+ group_id: "sg-26d0f1234",
+ group_name: "my_ec2_security_group"
+ }
+ ]
+ id:
+ description: The id of the ENI (alias for network_interface_id)
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ interface_type:
+ description: Type of the network interface
+ returned: always
+ type: str
+ sample: "interface"
+ ipv6_addresses:
+ description: List of IPv6 addresses for this interface
+ returned: always
+ type: list
+ sample: []
+ mac_address:
+ description: MAC address of the network interface
+ returned: always
+ type: str
+ sample: "0a:f8:10:2f:ab:a1"
+ name:
+ description: The Name tag of the ENI, often displayed in the AWS UIs as Name
+ returned: When a Name tag has been set
+ type: str
+ version_added: 1.3.0
+ network_interface_id:
+ description: The id of the ENI
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ owner_id:
+ description: AWS account id of the owner of the ENI
+ returned: always
+ type: str
+ sample: "4415120123456"
+ private_dns_name:
+ description: Private DNS name for the ENI
+ returned: always
+ type: str
+ sample: "ip-172-16-1-180.ec2.internal"
+ private_ip_address:
+ description: Private IP address for the ENI
+ returned: always
+ type: str
+ sample: "172.16.1.180"
+ private_ip_addresses:
+ description: List of private IP addresses attached to the ENI
+ returned: always
+ type: list
+ sample: []
+ requester_id:
+ description: The ID of the entity that launched the ENI
+ returned: always
+ type: str
+ sample: "AIDAIONYVJQNIAZFT3ABC"
+ requester_managed:
+ description: Indicates whether the network interface is being managed by an AWS service.
+ returned: always
+ type: bool
+ sample: false
+ source_dest_check:
+ description: Indicates whether the network interface performs source/destination checking.
+ returned: always
+ type: bool
+ sample: false
+ status:
+ description: Indicates if the network interface is attached to an instance or not
+ returned: always
+ type: str
+ sample: "in-use"
+ subnet_id:
+ description: Subnet ID the ENI is in
+ returned: always
+ type: str
+ sample: "subnet-7bbf01234"
+ tags:
+ description: Dictionary of tags added to the ENI
+ returned: always
+ type: dict
+ sample: {}
+ version_added: 1.3.0
+ tag_set:
+ description: Dictionary of tags added to the ENI
+ returned: always
+ type: dict
+ sample: {}
+ vpc_id:
+ description: ID of the VPC the network interface it part of
+ returned: always
+ type: str
+ sample: "vpc-b3f1f123"
+'''
+
+try:
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import NoCredentialsError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_eni(connection, module):
+
+ params = {}
+ # Options are mutually exclusive
+ if module.params.get("eni_id"):
+ params['NetworkInterfaceIds'] = [module.params.get("eni_id")]
+ elif module.params.get("filters"):
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+ else:
+ params['Filters'] = []
+
+ try:
+ network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **params)['NetworkInterfaces']
+ except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'):
+ module.exit_json(network_interfaces=[])
+ except (ClientError, NoCredentialsError) as e:
+ module.fail_json_aws(e)
+
+ # Modify boto3 tags list to be ansible friendly dict and then camel_case
+ camel_network_interfaces = []
+ for network_interface in network_interfaces_result:
+ network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet'])
+ network_interface['Tags'] = network_interface['TagSet']
+ if 'Name' in network_interface['Tags']:
+ network_interface['Name'] = network_interface['Tags']['Name']
+ # Added id to interface info to be compatible with return values of ec2_eni module:
+ network_interface['Id'] = network_interface['NetworkInterfaceId']
+ camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=['Tags', 'TagSet']))
+
+ module.exit_json(network_interfaces=camel_network_interfaces)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if hasattr(interface, 'publicDnsName'):
+ interface_info['association'] = {'public_ip_address': interface.publicIp,
+ 'public_dns_name': interface.publicDnsName,
+ 'ip_owner_id': interface.ipOwnerId
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def main():
+ argument_spec = dict(
+ eni_id=dict(type='str'),
+ filters=dict(default=None, type='dict')
+ )
+ mutually_exclusive = [
+ ['eni_id', 'filters']
+ ]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_eni_facts':
+ module.deprecate("The 'ec2_eni_facts' module has been renamed to 'ec2_eni_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
new file mode 100644
index 00000000..4741dfbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni_info
+version_added: 1.0.0
+short_description: Gather information about ec2 ENI interfaces in AWS
+description:
+ - Gather information about ec2 ENI interfaces in AWS.
+ - This module was called C(ec2_eni_facts) before Ansible 2.9. The usage did not change.
+author: "Rob White (@wimnat)"
+requirements: [ boto3 ]
+options:
+ eni_id:
+ description:
+ - The ID of the ENI.
+ - This option is mutually exclusive of I(filters).
+ type: str
+ version_added: 1.3.0
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
+ - This option is mutually exclusive of I(eni_id).
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all ENIs
+- amazon.aws.ec2_eni_info:
+
+# Gather information about a particular ENI
+- amazon.aws.ec2_eni_info:
+ filters:
+ network-interface-id: eni-xxxxxxx
+
+'''
+
+RETURN = '''
+network_interfaces:
+ description: List of matching elastic network interfaces
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: Info of associated elastic IP (EIP)
+ returned: When an ENI is associated with an EIP
+ type: dict
+ sample: {
+ allocation_id: "eipalloc-5sdf123",
+ association_id: "eipassoc-8sdf123",
+ ip_owner_id: "4415120123456",
+ public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com",
+ public_ip: "52.1.0.63"
+ }
+ attachment:
+ description: Info about attached ec2 instance
+ returned: When an ENI is attached to an ec2 instance
+ type: dict
+ sample: {
+ attach_time: "2017-08-05T15:25:47+00:00",
+ attachment_id: "eni-attach-149d21234",
+ delete_on_termination: false,
+ device_index: 1,
+ instance_id: "i-15b8d3cadbafa1234",
+ instance_owner_id: "4415120123456",
+ status: "attached"
+ }
+ availability_zone:
+ description: Availability zone of ENI
+ returned: always
+ type: str
+ sample: "us-east-1b"
+ description:
+ description: Description text for ENI
+ returned: always
+ type: str
+ sample: "My favourite network interface"
+ groups:
+ description: List of attached security groups
+ returned: always
+ type: list
+ sample: [
+ {
+ group_id: "sg-26d0f1234",
+ group_name: "my_ec2_security_group"
+ }
+ ]
+ id:
+ description: The id of the ENI (alias for network_interface_id)
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ interface_type:
+ description: Type of the network interface
+ returned: always
+ type: str
+ sample: "interface"
+ ipv6_addresses:
+ description: List of IPv6 addresses for this interface
+ returned: always
+ type: list
+ sample: []
+ mac_address:
+ description: MAC address of the network interface
+ returned: always
+ type: str
+ sample: "0a:f8:10:2f:ab:a1"
+ name:
+ description: The Name tag of the ENI, often displayed in the AWS UIs as Name
+ returned: When a Name tag has been set
+ type: str
+ version_added: 1.3.0
+ network_interface_id:
+ description: The id of the ENI
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ owner_id:
+ description: AWS account id of the owner of the ENI
+ returned: always
+ type: str
+ sample: "4415120123456"
+ private_dns_name:
+ description: Private DNS name for the ENI
+ returned: always
+ type: str
+ sample: "ip-172-16-1-180.ec2.internal"
+ private_ip_address:
+ description: Private IP address for the ENI
+ returned: always
+ type: str
+ sample: "172.16.1.180"
+ private_ip_addresses:
+ description: List of private IP addresses attached to the ENI
+ returned: always
+ type: list
+ sample: []
+ requester_id:
+ description: The ID of the entity that launched the ENI
+ returned: always
+ type: str
+ sample: "AIDAIONYVJQNIAZFT3ABC"
+ requester_managed:
+ description: Indicates whether the network interface is being managed by an AWS service.
+ returned: always
+ type: bool
+ sample: false
+ source_dest_check:
+ description: Indicates whether the network interface performs source/destination checking.
+ returned: always
+ type: bool
+ sample: false
+ status:
+ description: Indicates if the network interface is attached to an instance or not
+ returned: always
+ type: str
+ sample: "in-use"
+ subnet_id:
+ description: Subnet ID the ENI is in
+ returned: always
+ type: str
+ sample: "subnet-7bbf01234"
+ tags:
+ description: Dictionary of tags added to the ENI
+ returned: always
+ type: dict
+ sample: {}
+ version_added: 1.3.0
+ tag_set:
+ description: Dictionary of tags added to the ENI
+ returned: always
+ type: dict
+ sample: {}
+ vpc_id:
+ description: ID of the VPC the network interface it part of
+ returned: always
+ type: str
+ sample: "vpc-b3f1f123"
+'''
+
+try:
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import NoCredentialsError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_eni(connection, module):
+
+ params = {}
+ # Options are mutually exclusive
+ if module.params.get("eni_id"):
+ params['NetworkInterfaceIds'] = [module.params.get("eni_id")]
+ elif module.params.get("filters"):
+ params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+ else:
+ params['Filters'] = []
+
+ try:
+ network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **params)['NetworkInterfaces']
+ except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'):
+ module.exit_json(network_interfaces=[])
+ except (ClientError, NoCredentialsError) as e:
+ module.fail_json_aws(e)
+
+ # Modify boto3 tags list to be ansible friendly dict and then camel_case
+ camel_network_interfaces = []
+ for network_interface in network_interfaces_result:
+ network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet'])
+ network_interface['Tags'] = network_interface['TagSet']
+ if 'Name' in network_interface['Tags']:
+ network_interface['Name'] = network_interface['Tags']['Name']
+ # Added id to interface info to be compatible with return values of ec2_eni module:
+ network_interface['Id'] = network_interface['NetworkInterfaceId']
+ camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=['Tags', 'TagSet']))
+
+ module.exit_json(network_interfaces=camel_network_interfaces)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if hasattr(interface, 'publicDnsName'):
+ interface_info['association'] = {'public_ip_address': interface.publicIp,
+ 'public_dns_name': interface.publicDnsName,
+ 'ip_owner_id': interface.ipOwnerId
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def main():
+ argument_spec = dict(
+ eni_id=dict(type='str'),
+ filters=dict(default=None, type='dict')
+ )
+ mutually_exclusive = [
+ ['eni_id', 'filters']
+ ]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_eni_facts':
+ module.deprecate("The 'ec2_eni_facts' module has been renamed to 'ec2_eni_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group.py
new file mode 100644
index 00000000..2338aa69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group.py
@@ -0,0 +1,1380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_group
+version_added: 1.0.0
+author: "Andrew de Quincey (@adq)"
+requirements: [ boto3 ]
+short_description: maintain an ec2 VPC security group.
+description:
+ - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5.
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ type: str
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ type: str
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ type: str
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ type: str
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in `group_name`.
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ Rule sources list support was added in version 2.4. This allows to define multiple sources per
+ source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
+ In version 2.5 support for rule descriptions was added.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is coming from.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ from_port:
+ type: int
+ description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
+ to_port:
+ type: int
+ description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions
+ was added.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is going to.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ from_port:
+ type: int
+ description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
+ to_port:
+ type: int
+ description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ state:
+ description:
+ - Create or delete a security group.
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ type: str
+ purge_rules:
+ description:
+ - Purge existing rules on security group that are not found in rules.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ purge_rules_egress:
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ tags:
+ description:
+ - A dictionary of one or more tags to assign to the security group.
+ required: false
+ type: dict
+ aliases: ['resource_tags']
+ purge_tags:
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ type: bool
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+ - Preview diff mode support is added in version 2.7.
+'''
+
+EXAMPLES = '''
+- name: example using security group rule descriptions
+ amazon.aws.ec2_group:
+ name: "{{ name }}"
+ description: sg with rule descriptions
+ vpc_id: vpc-xxxxxxxx
+ profile: "{{ aws_profile }}"
+ region: us-east-1
+ rules:
+ - proto: tcp
+ ports:
+ - 80
+ cidr_ip: 0.0.0.0/0
+ rule_desc: allow all on port 80
+
+- name: example ec2 group
+ amazon.aws.ec2_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ # this should only be needed for EC2 Classic security group rules
+ # because in a VPC an ELB will use a user-account security group
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123412341234/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ - proto: all
+ # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6),
+ # traffic on all ports is allowed, regardless of any ports you specify
+ from_port: 10050 # this value is ignored
+ to_port: 10050 # this value is ignored
+ cidr_ip: 10.0.0.0/8
+
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ amazon.aws.ec2_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+ diff: True
+
+- name: "Delete group by its id"
+ amazon.aws.ec2_group:
+ region: eu-west-1
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: str
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: str
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: str
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: str
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "198.51.100.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import json
+import re
+import itertools
+from copy import deepcopy
+from time import sleep
+from collections import namedtuple
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.network import to_ipv6_subnet
+from ansible.module_utils.common.network import to_subnet
+from ansible.module_utils.six import string_types
+from ansible_collections.ansible.netcommon.plugins.module_utils.compat.ipaddress import IPv6Network
+from ansible_collections.ansible.netcommon.plugins.module_utils.compat.ipaddress import ip_network
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ..module_utils.ec2 import compare_aws_tags
+from ..module_utils.iam import get_aws_account_id
+from ..module_utils.waiters import get_waiter
+
+
+Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
+valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
+current_account_id = None
+
+
+def rule_cmp(a, b):
+ """Compare rules without descriptions"""
+ for prop in ['port_range', 'protocol', 'target', 'target_type']:
+ if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
+ # equal protocols can interchange `(-1, -1)` and `(None, None)`
+ if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
+ continue
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ return True
+
+
+def rules_to_permissions(rules):
+ return [to_permission(rule) for rule in rules]
+
+
+def to_permission(rule):
+ # take a Rule, output the serialized grant
+ perm = {
+ 'IpProtocol': rule.protocol,
+ }
+ perm['FromPort'], perm['ToPort'] = rule.port_range
+ if rule.target_type == 'ipv4':
+ perm['IpRanges'] = [{
+ 'CidrIp': rule.target,
+ }]
+ if rule.description:
+ perm['IpRanges'][0]['Description'] = rule.description
+ elif rule.target_type == 'ipv6':
+ perm['Ipv6Ranges'] = [{
+ 'CidrIpv6': rule.target,
+ }]
+ if rule.description:
+ perm['Ipv6Ranges'][0]['Description'] = rule.description
+ elif rule.target_type == 'group':
+ if isinstance(rule.target, tuple):
+ pair = {}
+ if rule.target[0]:
+ pair['UserId'] = rule.target[0]
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ if rule.target[1]:
+ pair['GroupId'] = rule.target[1]
+ elif rule.target[2]:
+ pair['GroupName'] = rule.target[2]
+ perm['UserIdGroupPairs'] = [pair]
+ else:
+ perm['UserIdGroupPairs'] = [{
+ 'GroupId': rule.target
+ }]
+ if rule.description:
+ perm['UserIdGroupPairs'][0]['Description'] = rule.description
+ elif rule.target_type == 'ip_prefix':
+ perm['PrefixListIds'] = [{
+ 'PrefixListId': rule.target,
+ }]
+ if rule.description:
+ perm['PrefixListIds'][0]['Description'] = rule.description
+ elif rule.target_type not in valid_targets:
+ raise ValueError('Invalid target type for rule {0}'.format(rule))
+ return fix_port_and_protocol(perm)
+
+
+def rule_from_group_permission(perm):
+ """
+ Returns a rule dict from an existing security group.
+
+ When using a security group as a target all 3 fields (OwnerId, GroupId, and
+ GroupName) need to exist in the target. This ensures consistency of the
+ values that will be compared to desired_ingress or desired_egress
+ in wait_for_rule_propagation().
+ GroupId is preferred as it is more specific except when targeting 'amazon-'
+ prefixed security groups (such as EC2 Classic ELBs).
+ """
+ def ports_from_permission(p):
+ if 'FromPort' not in p and 'ToPort' not in p:
+ return (None, None)
+ return (int(perm['FromPort']), int(perm['ToPort']))
+
+ # outputs a rule tuple
+ for target_key, target_subkey, target_type in [
+ ('IpRanges', 'CidrIp', 'ipv4'),
+ ('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
+ ('PrefixListIds', 'PrefixListId', 'ip_prefix'),
+ ]:
+ if target_key not in perm:
+ continue
+ for r in perm[target_key]:
+ # there may be several IP ranges here, which is ok
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ r[target_subkey],
+ target_type,
+ r.get('Description')
+ )
+ if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
+ for pair in perm['UserIdGroupPairs']:
+ target = (
+ pair.get('UserId', current_account_id),
+ pair.get('GroupId', None),
+ None,
+ )
+ if pair.get('UserId', '').startswith('amazon-'):
+ # amazon-elb and amazon-prefix rules don't need
+ # group-id specified, so remove it when querying
+ # from permission
+ target = (
+ pair.get('UserId', None),
+ None,
+ pair.get('GroupName', None),
+ )
+ elif 'VpcPeeringConnectionId' not in pair and pair['UserId'] != current_account_id:
+ # EC2-Classic cross-account
+ pass
+ elif 'VpcPeeringConnectionId' in pair:
+ # EC2-VPC cross-account VPC peering
+ target = (
+ pair.get('UserId', None),
+ pair.get('GroupId', None),
+ None,
+ )
+
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ target,
+ 'group',
+ pair.get('Description')
+ )
+
+
+# Wrap just this method so we can retry on missing groups
+@AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=['InvalidGroup.NotFound'])
+def get_security_groups_with_backoff(client, **kwargs):
+ return client.describe_security_groups(**kwargs)
+
+
+def sg_exists_with_backoff(client, **kwargs):
+ try:
+ return client.describe_security_groups(aws_retry=True, **kwargs)
+ except is_boto3_error_code('InvalidGroup.NotFound'):
+ return {'SecurityGroups': []}
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix',
+ 'group_id', 'group_name', 'group_desc',
+ 'proto', 'from_port', 'to_port', 'rule_desc')
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (target_type, target, group_created) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+
+ When using a security group as a target all 3 fields (OwnerId, GroupId, and
+ GroupName) need to exist in the target. This ensures consistency of the
+ values that will be compared to current_rules (from current_ingress and
+ current_egress) in wait_for_rule_propagation().
+ """
+ FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
+ owner_id = current_account_id
+ group_id = None
+ group_name = None
+ target_group_created = False
+
+ validate_rule(module, rule)
+ if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ # Matches on groups like amazon-elb/sg-5a9c116a/amazon-elb-sg, amazon-elb/amazon-elb-sg,
+ # and peer-VPC groups like 0987654321/sg-1234567890/example
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ if group_id and group_name:
+ if group_name.startswith('amazon-'):
+ # amazon-elb and amazon-prefix rules don't need group_id specified,
+ group_id = None
+ else:
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ group_name = None
+ return 'group', (owner_id, group_id, group_name), False
+ elif 'group_id' in rule:
+ return 'group', (owner_id, rule['group_id'], None), False
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ auto_group = None
+ filters = {'group-name': group_name}
+ if vpc_id:
+ filters['vpc-id'] = vpc_id
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ # retry describing the group once
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
+ module.fail_json(msg="group %s will be automatically created by rule %s but "
+ "no description was provided" % (group_name, rule))
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ elif not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ auto_group = client.create_security_group(aws_retry=True, **params)
+ get_waiter(
+ client, 'security_group_exists',
+ ).wait(
+ GroupIds=[auto_group['GroupId']],
+ )
+ except is_boto3_error_code('InvalidGroup.Duplicate'):
+ # The group exists, but didn't show up in any of our describe-security-groups calls
+ # Try searching on a filter for the name, and allow a retry window for AWS to update
+ # the model on their end.
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except IndexError as e:
+ module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ except ClientError as e:
+ module.fail_json_aws(
+ e,
+ msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ if auto_group is not None:
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ return 'group', (owner_id, group_id, None), target_group_created
+ elif 'cidr_ip' in rule:
+ return 'ipv4', validate_ip(module, rule['cidr_ip']), False
+ elif 'cidr_ipv6' in rule:
+ return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
+ elif 'ip_prefix' in rule:
+ return 'ip_prefix', rule['ip_prefix'], False
+
+ module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, string_types):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((int(port.strip()),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ if 'ports' not in rule:
+ if isinstance(rule.get('from_port'), string_types):
+ rule['from_port'] = int(rule.get('from_port'))
+ if isinstance(rule.get('to_port'), string_types):
+ rule['to_port'] = int(rule.get('to_port'))
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule discts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def update_rules_description(module, client, rule_type, group_id, ip_permissions):
+ if module.check_mode:
+ return
+ try:
+ if rule_type == "in":
+ client.update_security_group_rule_descriptions_ingress(
+ aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions)
+ if rule_type == "out":
+ client.update_security_group_rule_descriptions_egress(
+ aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
+
+
+def fix_port_and_protocol(permission):
+ for key in ('FromPort', 'ToPort'):
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = to_text(permission['IpProtocol'])
+
+ return permission
+
+
+def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
+ if revoke_ingress:
+ revoke(client, module, revoke_ingress, group_id, 'in')
+ if revoke_egress:
+ revoke(client, module, revoke_egress, group_id, 'out')
+ return bool(revoke_ingress or revoke_egress)
+
+
+def revoke(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.revoke_security_group_ingress(
+ aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.revoke_security_group_egress(
+ aws_retry=True,
+ GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
+
+
+def add_new_permissions(client, module, new_ingress, new_egress, group_id):
+ if new_ingress:
+ authorize(client, module, new_ingress, group_id, 'in')
+ if new_egress:
+ authorize(client, module, new_egress, group_id, 'out')
+ return bool(new_ingress or new_egress)
+
+
+def authorize(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.authorize_security_group_ingress(
+ aws_retry=True,
+ GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.authorize_security_group_egress(
+ aws_retry=True,
+ GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
+
+
+def validate_ip(module, cidr_ip):
+ split_addr = cidr_ip.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
+ # Get the network bits if IPv4, and validate if IPv6.
+ try:
+ ip = to_subnet(split_addr[0], split_addr[1])
+ if ip != cidr_ip:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(
+ cidr_ip, ip))
+ except ValueError:
+ # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
+ try:
+ isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
+ ip = cidr_ip
+ except ValueError:
+ # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
+ # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
+ ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
+ if ip6 != cidr_ip:
+ module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
+ return ip6
+ return ip
+ return cidr_ip
+
+
+def update_tags(client, module, group_id, current_tags, tags, purge_tags):
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if not module.check_mode:
+ if tags_to_delete:
+ try:
+ client.delete_tags(aws_retry=True, Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(aws_retry=True, Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to add tags {0}".format(tags_need_modify))
+
+ return bool(tags_need_modify or tags_to_delete)
+
+
+def update_rule_descriptions(module, client, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
+ changed = False
+ ingress_needs_desc_update = []
+ egress_needs_desc_update = []
+
+ for present_rule in present_egress:
+ needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_egress_list.remove(r)
+ egress_needs_desc_update.extend(needs_update)
+ for present_rule in present_ingress:
+ needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_ingress_list.remove(r)
+ ingress_needs_desc_update.extend(needs_update)
+
+ if ingress_needs_desc_update:
+ update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
+ changed |= True
+ if egress_needs_desc_update:
+ update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
+ changed |= True
+ return changed
+
+
+def create_security_group(client, module, name, description, vpc_id):
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ group = client.create_security_group(aws_retry=True, **params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create security group")
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ sleep(3)
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+ return group
+ return None
+
+
+def wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
+ group_id = group['GroupId']
+ tries = 6
+
+ def await_rules(group, desired_rules, purge, rule_key):
+ for i in range(tries):
+ current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
+ if purge and len(current_rules ^ set(desired_rules)) == 0:
+ return group
+ elif purge:
+ conflicts = current_rules ^ set(desired_rules)
+ # For cases where set comparison is equivalent, but invalid port/proto exist
+ for a, b in itertools.combinations(conflicts, 2):
+ if rule_cmp(a, b):
+ conflicts.discard(a)
+ conflicts.discard(b)
+ if not len(conflicts):
+ return group
+ elif current_rules.issuperset(desired_rules) and not purge:
+ return group
+ sleep(10)
+ group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0]
+ module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
+ return group
+
+ group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0]
+ if 'VpcId' in group and module.params.get('rules_egress') is not None:
+ group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
+ return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
+
+
+def group_exists(client, module, vpc_id, group_id, name):
+ params = {'Filters': []}
+ if group_id:
+ params['GroupIds'] = [group_id]
+ if name:
+ # Add name to filters rather than params['GroupNames']
+ # because params['GroupNames'] only checks the default vpc if no vpc is provided
+ params['Filters'].append({'Name': 'group-name', 'Values': [name]})
+ if vpc_id:
+ params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
+ # Don't filter by description to maintain backwards compatibility
+
+ try:
+ security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
+ all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Error in describe_security_groups")
+
+ if security_groups:
+ groups = dict((group['GroupId'], group) for group in all_groups)
+ groups.update(dict((group['GroupName'], group) for group in all_groups))
+ if vpc_id:
+ vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
+ groups.update(vpc_wins)
+ # maintain backwards compatibility by using the last matching group
+ return security_groups[-1], groups
+ return None, {}
+
+
+def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress):
+ if not hasattr(client, "update_security_group_rule_descriptions_egress"):
+ all_rules = rules if rules else [] + rules_egress if rules_egress else []
+ if any('rule_desc' in rule for rule in all_rules):
+ module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.")
+
+
+def get_diff_final_resource(client, module, security_group):
+ def get_account_id(security_group, module):
+ try:
+ owner_id = security_group.get('owner_id', current_account_id)
+ except (BotoCoreError, ClientError) as e:
+ owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
+ return owner_id
+
+ def get_final_tags(security_group_tags, specified_tags, purge_tags):
+ if specified_tags is None:
+ return security_group_tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
+ end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
+ end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
+ end_result_tags.update(tags_need_modify)
+ return end_result_tags
+
+ def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
+ if specified_rules is None:
+ return security_group_rules
+ if purge_rules:
+ final_rules = []
+ else:
+ final_rules = list(security_group_rules)
+ specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
+ for rule in specified_rules:
+ format_rule = {
+ 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
+ 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
+ }
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ format_rule['ip_protocol'] = '-1'
+ format_rule.pop('from_port')
+ format_rule.pop('to_port')
+ elif rule.get('ports'):
+ if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
+ rule['ports'] = [rule['ports']]
+ for port in rule.get('ports'):
+ if isinstance(port, string_types) and '-' in port:
+ format_rule['from_port'], format_rule['to_port'] = port.split('-')
+ else:
+ format_rule['from_port'] = format_rule['to_port'] = port
+ elif rule.get('from_port') or rule.get('to_port'):
+ format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
+ format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
+ for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
+ if rule.get(source_type):
+ rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
+ if rule.get('rule_desc'):
+ format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
+ else:
+ if not isinstance(rule[source_type], list):
+ rule[source_type] = [rule[source_type]]
+ format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
+ if rule.get('group_id') or rule.get('group_name'):
+ rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0])
+ format_rule['user_id_group_pairs'] = [{
+ 'description': rule_sg.get('description', rule_sg.get('group_desc')),
+ 'group_id': rule_sg.get('group_id', rule.get('group_id')),
+ 'group_name': rule_sg.get('group_name', rule.get('group_name')),
+ 'peering_status': rule_sg.get('peering_status'),
+ 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
+ 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
+ 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
+ }]
+ for k, v in list(format_rule['user_id_group_pairs'][0].items()):
+ if v is None:
+ format_rule['user_id_group_pairs'][0].pop(k)
+ final_rules.append(format_rule)
+ # Order final rules consistently
+ final_rules.sort(key=get_ip_permissions_sort_key)
+ return final_rules
+ security_group_ingress = security_group.get('ip_permissions', [])
+ specified_ingress = module.params['rules']
+ purge_ingress = module.params['purge_rules']
+ security_group_egress = security_group.get('ip_permissions_egress', [])
+ specified_egress = module.params['rules_egress']
+ purge_egress = module.params['purge_rules_egress']
+ return {
+ 'description': module.params['description'],
+ 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
+ 'group_name': security_group.get('group_name', module.params['name']),
+ 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
+ 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
+ 'owner_id': get_account_id(security_group, module),
+ 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
+ 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
+
+
+def flatten_nested_targets(module, rules):
+ def _flatten(targets):
+ for target in targets:
+ if isinstance(target, list):
+ for t in _flatten(target):
+ yield t
+ elif isinstance(target, string_types):
+ yield target
+
+ if rules is not None:
+ for rule in rules:
+ target_list_type = None
+ if isinstance(rule.get('cidr_ip'), list):
+ target_list_type = 'cidr_ip'
+ elif isinstance(rule.get('cidr_ipv6'), list):
+ target_list_type = 'cidr_ipv6'
+ if target_list_type is not None:
+ rule[target_list_type] = list(_flatten(rule[target_list_type]))
+ return rules
+
+
+def get_rule_sort_key(dicts):
+ if dicts.get('cidr_ip'):
+ return dicts.get('cidr_ip')
+ elif dicts.get('cidr_ipv6'):
+ return dicts.get('cidr_ipv6')
+ elif dicts.get('prefix_list_id'):
+ return dicts.get('prefix_list_id')
+ elif dicts.get('group_id'):
+ return dicts.get('group_id')
+ return None
+
+
+def get_ip_permissions_sort_key(rule):
+ if rule.get('ip_ranges'):
+ rule.get('ip_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ip_ranges')[0]['cidr_ip']
+ elif rule.get('ipv6_ranges'):
+ rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ipv6_ranges')[0]['cidr_ipv6']
+ elif rule.get('prefix_list_ids'):
+ rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
+ return rule.get('prefix_list_ids')[0]['prefix_list_id']
+ elif rule.get('user_id_group_pairs'):
+ rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
+ return rule.get('user_id_group_pairs')[0]['group_id']
+ return None
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list', elements='dict'),
+ rules_egress=dict(type='list', elements='dict'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
+ rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ client = module.client('ec2', AWSRetry.jittered_backoff())
+
+ verify_rules_with_descriptions_permitted(client, module, rules, rules_egress)
+ group, groups = group_exists(client, module, vpc_id, group_id, name)
+ group_created_new = not bool(group)
+
+ global current_account_id
+ current_account_id = get_aws_account_id(module)
+
+ before = {}
+ after = {}
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ try:
+ if not module.check_mode:
+ client.delete_security_group(aws_retry=True, GroupId=group['GroupId'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+ else:
+ # no match found, create it
+ group = create_security_group(client, module, name, description, vpc_id)
+ changed = True
+
+ if tags is not None and group is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
+
+ if group:
+ named_tuple_ingress_list = []
+ named_tuple_egress_list = []
+ current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
+ current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
+
+ for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
+ (rules_egress, 'out', named_tuple_egress_list)]:
+ if new_rules is None:
+ continue
+ for rule in new_rules:
+ target_type, target, target_group_created = get_target_from_rule(
+ module, client, rule, name, group, groups, vpc_id)
+ changed |= target_group_created
+
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ rule['proto'] = '-1'
+ rule['from_port'] = None
+ rule['to_port'] = None
+ try:
+ int(rule.get('proto', 'tcp'))
+ rule['proto'] = to_text(rule.get('proto', 'tcp'))
+ rule['from_port'] = None
+ rule['to_port'] = None
+ except ValueError:
+ # rule does not use numeric protocol spec
+ pass
+
+ named_tuple_rule_list.append(
+ Rule(
+ port_range=(rule['from_port'], rule['to_port']),
+ protocol=to_text(rule.get('proto', 'tcp')),
+ target=target, target_type=target_type,
+ description=rule.get('rule_desc'),
+ )
+ )
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
+
+ if module.params.get('rules_egress') is None and 'VpcId' in group:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ if rule in current_egress:
+ named_tuple_egress_list.append(rule)
+ if rule not in current_egress:
+ current_egress.append(rule)
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
+ present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
+
+ if purge_rules:
+ revoke_ingress = []
+ for p in present_ingress:
+ if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]):
+ revoke_ingress.append(to_permission(p))
+ else:
+ revoke_ingress = []
+ if purge_rules_egress and module.params.get('rules_egress') is not None:
+ if module.params.get('rules_egress') is []:
+ revoke_egress = [
+ to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
+ if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ ]
+ else:
+ revoke_egress = []
+ for p in present_egress:
+ if not any([rule_cmp(p, b) for b in named_tuple_egress_list]):
+ revoke_egress.append(to_permission(p))
+ else:
+ revoke_egress = []
+
+ # named_tuple_ingress_list and named_tuple_egress_list get updated by
+ # method update_rule_descriptions, deep copy these two lists to new
+ # variables for the record of the 'desired' ingress and egress sg permissions
+ desired_ingress = deepcopy(named_tuple_ingress_list)
+ desired_egress = deepcopy(named_tuple_egress_list)
+
+ changed |= update_rule_descriptions(module, client, group['GroupId'], present_ingress,
+ named_tuple_ingress_list, present_egress, named_tuple_egress_list)
+
+ # Revoke old rules
+ changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
+ rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress)
+
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
+ new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
+ # Authorize new rules
+ changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
+
+ if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
+ # A new group with no rules provided is already being awaited.
+ # When it is created we wait for the default egress rule to be added by AWS
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ elif changed and not module.check_mode:
+ # keep pulling until current security group rules match the desired ingress and egress rules
+ security_group = wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
+ else:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
+
+ else:
+ security_group = {'group_id': None}
+
+ if module._diff:
+ if module.params['state'] == 'present':
+ after = get_diff_final_resource(client, module, security_group)
+ if before.get('ip_permissions'):
+ before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
+
+ security_group['diff'] = [{'before': before, 'after': after}]
+
+ module.exit_json(changed=changed, **security_group)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_facts.py
new file mode 100644
index 00000000..228b82d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_facts.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_group_info
+version_added: 1.0.0
+short_description: Gather information about ec2 security groups in AWS.
+description:
+ - Gather information about ec2 security groups in AWS.
+ - This module was called C(amazon.aws.ec2_group_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author:
+- Henrique Rodrigues (@Sodki)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for
+ possible filters. Filter names and values are case sensitive. You can also use underscores (_)
+ instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+notes:
+ - By default, the module will return all security groups. To limit results use the appropriate filters.
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all security groups
+- amazon.aws.ec2_group_info:
+
+# Gather information about all security groups in a specific VPC
+- amazon.aws.ec2_group_info:
+ filters:
+ vpc-id: vpc-12345678
+
+# Gather information about all security groups in a specific VPC
+- amazon.aws.ec2_group_info:
+ filters:
+ vpc-id: vpc-12345678
+
+# Gather information about a security group
+- amazon.aws.ec2_group_info:
+ filters:
+ group-name: example-1
+
+# Gather information about a security group by id
+- amazon.aws.ec2_group_info:
+ filters:
+ group-id: sg-12345678
+
+# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys
+- amazon.aws.ec2_group_info:
+ filters:
+ group_id: sg-12345678
+ vpc-id: vpc-12345678
+
+# Gather information about various security groups
+- amazon.aws.ec2_group_info:
+ filters:
+ group-name:
+ - example-1
+ - example-2
+ - example-3
+
+# Gather information about any security group with a tag key Name and value Example.
+# The quotes around 'tag:name' are important because of the colon in the value
+- amazon.aws.ec2_group_info:
+ filters:
+ "tag:Name": Example
+'''
+
+RETURN = '''
+security_groups:
+ description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
+ type: list
+ returned: always
+ sample:
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_group_facts':
+ module.deprecate("The 'ec2_group_facts' module has been renamed to 'ec2_group_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
+ filters = module.params.get("filters")
+ sanitized_filters = dict()
+
+ for key in filters:
+ if key.startswith("tag:"):
+ sanitized_filters[key] = filters[key]
+ else:
+ sanitized_filters[key.replace("_", "-")] = filters[key]
+
+ try:
+ security_groups = connection.describe_security_groups(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to describe security groups')
+
+ snaked_security_groups = []
+ for security_group in security_groups['SecurityGroups']:
+ # Modify boto3 tags list to be ansible friendly dict
+ # but don't camel case tags
+ security_group = camel_dict_to_snake_dict(security_group)
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value')
+ snaked_security_groups.append(security_group)
+
+ module.exit_json(security_groups=snaked_security_groups)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_info.py
new file mode 100644
index 00000000..228b82d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_group_info.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_group_info
+version_added: 1.0.0
+short_description: Gather information about ec2 security groups in AWS.
+description:
+ - Gather information about ec2 security groups in AWS.
+ - This module was called C(amazon.aws.ec2_group_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author:
+- Henrique Rodrigues (@Sodki)
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for
+ possible filters. Filter names and values are case sensitive. You can also use underscores (_)
+ instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+notes:
+ - By default, the module will return all security groups. To limit results use the appropriate filters.
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all security groups
+- amazon.aws.ec2_group_info:
+
+# Gather information about all security groups in a specific VPC
+- amazon.aws.ec2_group_info:
+ filters:
+ vpc-id: vpc-12345678
+
+# Gather information about all security groups in a specific VPC
+- amazon.aws.ec2_group_info:
+ filters:
+ vpc-id: vpc-12345678
+
+# Gather information about a security group
+- amazon.aws.ec2_group_info:
+ filters:
+ group-name: example-1
+
+# Gather information about a security group by id
+- amazon.aws.ec2_group_info:
+ filters:
+ group-id: sg-12345678
+
+# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys
+- amazon.aws.ec2_group_info:
+ filters:
+ group_id: sg-12345678
+ vpc-id: vpc-12345678
+
+# Gather information about various security groups
+- amazon.aws.ec2_group_info:
+ filters:
+ group-name:
+ - example-1
+ - example-2
+ - example-3
+
+# Gather information about any security group with a tag key Name and value Example.
+# The quotes around 'tag:name' are important because of the colon in the value
+- amazon.aws.ec2_group_info:
+ filters:
+ "tag:Name": Example
+'''
+
+RETURN = '''
+security_groups:
+ description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
+ type: list
+ returned: always
+ sample:
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_group_facts':
+ module.deprecate("The 'ec2_group_facts' module has been renamed to 'ec2_group_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
+ filters = module.params.get("filters")
+ sanitized_filters = dict()
+
+ for key in filters:
+ if key.startswith("tag:"):
+ sanitized_filters[key] = filters[key]
+ else:
+ sanitized_filters[key.replace("_", "-")] = filters[key]
+
+ try:
+ security_groups = connection.describe_security_groups(
+ aws_retry=True,
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to describe security groups')
+
+ snaked_security_groups = []
+ for security_group in security_groups['SecurityGroups']:
+ # Modify boto3 tags list to be ansible friendly dict
+ # but don't camel case tags
+ security_group = camel_dict_to_snake_dict(security_group)
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value')
+ snaked_security_groups.append(security_group)
+
+ module.exit_json(security_groups=snaked_security_groups)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_key.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_key.py
new file mode 100644
index 00000000..815130f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_key.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_key
+version_added: 1.0.0
+short_description: create or delete an ec2 key pair
+description:
+ - create or delete an ec2 key pair.
+options:
+ name:
+ description:
+ - Name of the key pair.
+ required: true
+ type: str
+ key_material:
+ description:
+ - Public key material.
+ required: false
+ type: str
+ force:
+ description:
+ - Force overwrite of already existing key pair if key has changed.
+ required: false
+ default: true
+ type: bool
+ state:
+ description:
+ - create or delete keypair
+ required: false
+ choices: [ present, absent ]
+ default: 'present'
+ type: str
+ wait:
+ description:
+ - This option has no effect since version 2.5 and will be removed after 2022-06-01.
+ type: bool
+ wait_timeout:
+ description:
+ - This option has no effect since version 2.5 and will be removed after 2022-06-01.
+ type: int
+ required: false
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ boto3 ]
+author:
+ - "Vincent Viallet (@zbal)"
+ - "Prasad Katti (@prasadkatti)"
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a new ec2 key pair, returns generated private key
+ amazon.aws.ec2_key:
+ name: my_keypair
+
+- name: create key pair using provided key_material
+ amazon.aws.ec2_key:
+ name: my_keypair
+ key_material: 'ssh-rsa AAAAxyz...== me@example.com'
+
+- name: create key pair using key_material obtained using 'file' lookup plugin
+ amazon.aws.ec2_key:
+ name: my_keypair
+ key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}"
+
+# try creating a key pair with the name of an already existing keypair
+# but don't overwrite it even if the key is different (force=false)
+- name: try creating a key pair with name of an already existing keypair
+ amazon.aws.ec2_key:
+ name: my_existing_keypair
+ key_material: 'ssh-rsa AAAAxyz...== me@example.com'
+ force: false
+
+- name: remove key pair by name
+ amazon.aws.ec2_key:
+ name: my_keypair
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: whether a keypair was created/deleted
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: short message describing the action taken
+ returned: always
+ type: str
+ sample: key pair created
+key:
+ description: details of the keypair (this is set to null when state is absent)
+ returned: always
+ type: complex
+ contains:
+ fingerprint:
+ description: fingerprint of the key
+ returned: when state is present
+ type: str
+ sample: 'b0:22:49:61:d9:44:9d:0c:7e:ac:8a:32:93:21:6c:e8:fb:59:62:43'
+ name:
+ description: name of the keypair
+ returned: when state is present
+ type: str
+ sample: my_keypair
+ private_key:
+ description: private key of a newly created keypair
+ returned: when a new keypair is created by AWS (key_material is not provided)
+ type: str
+ sample: '-----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKC...
+ -----END RSA PRIVATE KEY-----'
+'''
+
+import uuid
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_bytes
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+
+
+def extract_key_data(key):
+
+ data = {
+ 'name': key['KeyName'],
+ 'fingerprint': key['KeyFingerprint']
+ }
+ if 'KeyMaterial' in key:
+ data['private_key'] = key['KeyMaterial']
+ return data
+
+
+def get_key_fingerprint(module, ec2_client, key_material):
+ '''
+ EC2's fingerprints are non-trivial to generate, so push this key
+ to a temporary name and make ec2 calculate the fingerprint for us.
+ http://blog.jbrowne.com/?p=23
+ https://forums.aws.amazon.com/thread.jspa?messageID=352828
+ '''
+
+ # find an unused name
+ name_in_use = True
+ while name_in_use:
+ random_name = "ansible-" + str(uuid.uuid4())
+ name_in_use = find_key_pair(module, ec2_client, random_name)
+
+ temp_key = import_key_pair(module, ec2_client, random_name, key_material)
+ delete_key_pair(module, ec2_client, random_name, finish_task=False)
+ return temp_key['KeyFingerprint']
+
+
+def find_key_pair(module, ec2_client, name):
+
+ try:
+ key = ec2_client.describe_key_pairs(aws_retry=True, KeyNames=[name])['KeyPairs'][0]
+ except is_boto3_error_code('InvalidKeyPair.NotFound'):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err:
+ module.fail_json_aws(err, msg="error finding keypair")
+ except IndexError:
+ key = None
+ return key
+
+
+def create_key_pair(module, ec2_client, name, key_material, force):
+
+ key = find_key_pair(module, ec2_client, name)
+ if key:
+ if key_material and force:
+ if not module.check_mode:
+ new_fingerprint = get_key_fingerprint(module, ec2_client, key_material)
+ if key['KeyFingerprint'] != new_fingerprint:
+ delete_key_pair(module, ec2_client, name, finish_task=False)
+ key = import_key_pair(module, ec2_client, name, key_material)
+ key_data = extract_key_data(key)
+ module.exit_json(changed=True, key=key_data, msg="key pair updated")
+ else:
+ # Assume a change will be made in check mode since a comparison can't be done
+ module.exit_json(changed=True, key=extract_key_data(key), msg="key pair updated")
+ key_data = extract_key_data(key)
+ module.exit_json(changed=False, key=key_data, msg="key pair already exists")
+ else:
+ # key doesn't exist, create it now
+ key_data = None
+ if not module.check_mode:
+ if key_material:
+ key = import_key_pair(module, ec2_client, name, key_material)
+ else:
+ try:
+ key = ec2_client.create_key_pair(aws_retry=True, KeyName=name)
+ except botocore.exceptions.ClientError as err:
+ module.fail_json_aws(err, msg="error creating key")
+ key_data = extract_key_data(key)
+ module.exit_json(changed=True, key=key_data, msg="key pair created")
+
+
+def import_key_pair(module, ec2_client, name, key_material):
+
+ try:
+ key = ec2_client.import_key_pair(aws_retry=True, KeyName=name, PublicKeyMaterial=to_bytes(key_material))
+ except botocore.exceptions.ClientError as err:
+ module.fail_json_aws(err, msg="error importing key")
+ return key
+
+
+def delete_key_pair(module, ec2_client, name, finish_task=True):
+
+ key = find_key_pair(module, ec2_client, name)
+ if key:
+ if not module.check_mode:
+ try:
+ ec2_client.delete_key_pair(aws_retry=True, KeyName=name)
+ except botocore.exceptions.ClientError as err:
+ module.fail_json_aws(err, msg="error deleting key")
+ if not finish_task:
+ return
+ module.exit_json(changed=True, key=None, msg="key deleted")
+ module.exit_json(key=None, msg="key did not exist")
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True),
+ key_material=dict(),
+ force=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', removed_at_date='2022-06-01', removed_from_collection='amazon.aws'),
+ wait_timeout=dict(type='int', removed_at_date='2022-06-01', removed_from_collection='amazon.aws')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ name = module.params['name']
+ state = module.params.get('state')
+ key_material = module.params.get('key_material')
+ force = module.params.get('force')
+
+ if state == 'absent':
+ delete_key_pair(module, ec2_client, name)
+ elif state == 'present':
+ create_key_pair(module, ec2_client, name, key_material, force)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
new file mode 100644
index 00000000..e871f2d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_metadata_facts
+version_added: 1.0.0
+short_description: Gathers facts (instance metadata) about remote hosts within ec2
+author:
+ - Silviu Dicu (@silviud)
+ - Vinay Dandekar (@roadmapper)
+description:
+ - This module fetches data from the instance metadata endpoint in ec2 as per
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).
+ - The module must be called from within the EC2 instance itself.
+notes:
+ - Parameters to filter on ec2_metadata_facts may be added later.
+'''
+
+EXAMPLES = '''
+# Gather EC2 metadata facts
+- amazon.aws.ec2_metadata_facts:
+
+- debug:
+ msg: "This instance is a t1.micro"
+ when: ansible_ec2_instance_type == "t1.micro"
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary of new facts representing discovered properties of the EC2 instance.
+ returned: changed
+ type: complex
+ contains:
+ ansible_ec2_ami_id:
+ description: The AMI ID used to launch the instance.
+ type: str
+ sample: "ami-XXXXXXXX"
+ ansible_ec2_ami_launch_index:
+ description:
+ - If you started more than one instance at the same time, this value indicates the order in which the instance was launched.
+ - The value of the first instance launched is 0.
+ type: str
+ sample: "0"
+ ansible_ec2_ami_manifest_path:
+ description:
+ - The path to the AMI manifest file in Amazon S3.
+ - If you used an Amazon EBS-backed AMI to launch the instance, the returned result is unknown.
+ type: str
+ sample: "(unknown)"
+ ansible_ec2_ancestor_ami_ids:
+ description:
+ - The AMI IDs of any instances that were rebundled to create this AMI.
+ - This value will only exist if the AMI manifest file contained an ancestor-amis key.
+ type: str
+ sample: "(unknown)"
+ ansible_ec2_block_device_mapping_ami:
+ description: The virtual device that contains the root/boot file system.
+ type: str
+ sample: "/dev/sda1"
+ ansible_ec2_block_device_mapping_ebsN:
+ description:
+ - The virtual devices associated with Amazon EBS volumes, if any are present.
+ - Amazon EBS volumes are only available in metadata if they were present at launch time or when the instance was last started.
+ - The N indicates the index of the Amazon EBS volume (such as ebs1 or ebs2).
+ type: str
+ sample: "/dev/xvdb"
+ ansible_ec2_block_device_mapping_ephemeralN:
+ description: The virtual devices associated with ephemeral devices, if any are present. The N indicates the index of the ephemeral volume.
+ type: str
+ sample: "/dev/xvdc"
+ ansible_ec2_block_device_mapping_root:
+ description:
+ - The virtual devices or partitions associated with the root devices, or partitions on the virtual device,
+ where the root (/ or C) file system is associated with the given instance.
+ type: str
+ sample: "/dev/sda1"
+ ansible_ec2_block_device_mapping_swap:
+ description: The virtual devices associated with swap. Not always present.
+ type: str
+ sample: "/dev/sda2"
+ ansible_ec2_fws_instance_monitoring:
+ description: "Value showing whether the customer has enabled detailed one-minute monitoring in CloudWatch."
+ type: str
+ sample: "enabled"
+ ansible_ec2_hostname:
+ description:
+ - The private IPv4 DNS hostname of the instance.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "ip-10-0-0-1.ec2.internal"
+ ansible_ec2_iam_info:
+ description:
+ - If there is an IAM role associated with the instance, contains information about the last time the instance profile was updated,
+ including the instance's LastUpdated date, InstanceProfileArn, and InstanceProfileId. Otherwise, not present.
+ type: complex
+ sample: ""
+ contains:
+ LastUpdated:
+ description: The last time which InstanceProfile is associated with the Instance changed.
+ type: str
+ InstanceProfileArn:
+ description: The ARN of the InstanceProfile associated with the Instance.
+ type: str
+ InstanceProfileId:
+ description: The Id of the InstanceProfile associated with the Instance.
+ type: str
+ ansible_ec2_iam_info_instanceprofilearn:
+ description: The IAM instance profile ARN.
+ type: str
+ sample: "arn:aws:iam::<account id>:instance-profile/<role name>"
+ ansible_ec2_iam_info_instanceprofileid:
+ description: IAM instance profile ID.
+ type: str
+ sample: ""
+ ansible_ec2_iam_info_lastupdated:
+ description: IAM info last updated time.
+ type: str
+ sample: "2017-05-12T02:42:27Z"
+ ansible_ec2_iam_instance_profile_role:
+ description: IAM instance role.
+ type: str
+ sample: "role_name"
+ ansible_ec2_iam_security_credentials_<role name>:
+ description:
+ - If there is an IAM role associated with the instance, role-name is the name of the role,
+ and role-name contains the temporary security credentials associated with the role. Otherwise, not present.
+ type: str
+ sample: ""
+ ansible_ec2_iam_security_credentials_<role name>_accesskeyid:
+ description: IAM role access key ID.
+ type: str
+ sample: ""
+ ansible_ec2_iam_security_credentials_<role name>_code:
+ description: IAM code.
+ type: str
+ sample: "Success"
+ ansible_ec2_iam_security_credentials_<role name>_expiration:
+ description: IAM role credentials expiration time.
+ type: str
+ sample: "2017-05-12T09:11:41Z"
+ ansible_ec2_iam_security_credentials_<role name>_lastupdated:
+ description: IAM role last updated time.
+ type: str
+ sample: "2017-05-12T02:40:44Z"
+ ansible_ec2_iam_security_credentials_<role name>_secretaccesskey:
+ description: IAM role secret access key.
+ type: str
+ sample: ""
+ ansible_ec2_iam_security_credentials_<role name>_token:
+ description: IAM role token.
+ type: str
+ sample: ""
+ ansible_ec2_iam_security_credentials_<role name>_type:
+ description: IAM role type.
+ type: str
+ sample: "AWS-HMAC"
+ ansible_ec2_instance_action:
+ description: Notifies the instance that it should reboot in preparation for bundling.
+ type: str
+ sample: "none"
+ ansible_ec2_instance_id:
+ description: The ID of this instance.
+ type: str
+ sample: "i-XXXXXXXXXXXXXXXXX"
+ ansible_ec2_instance_identity_document:
+ description: JSON containing instance attributes, such as instance-id, private IP address, etc.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_accountid:
+ description: ""
+ type: str
+ sample: "012345678901"
+ ansible_ec2_instance_identity_document_architecture:
+ description: Instance system architecture.
+ type: str
+ sample: "x86_64"
+ ansible_ec2_instance_identity_document_availabilityzone:
+ description: The Availability Zone in which the instance launched.
+ type: str
+ sample: "us-east-1a"
+ ansible_ec2_instance_identity_document_billingproducts:
+ description: Billing products for this instance.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_devpayproductcodes:
+ description: Product codes for the launched AMI.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_imageid:
+ description: The AMI ID used to launch the instance.
+ type: str
+ sample: "ami-01234567"
+ ansible_ec2_instance_identity_document_instanceid:
+ description: The ID of this instance.
+ type: str
+ sample: "i-0123456789abcdef0"
+ ansible_ec2_instance_identity_document_instancetype:
+ description: The type of instance.
+ type: str
+ sample: "m4.large"
+ ansible_ec2_instance_identity_document_kernelid:
+ description: The ID of the kernel launched with this instance, if applicable.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_pendingtime:
+ description: The instance pending time.
+ type: str
+ sample: "2017-05-11T20:51:20Z"
+ ansible_ec2_instance_identity_document_privateip:
+ description:
+ - The private IPv4 address of the instance.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "10.0.0.1"
+ ansible_ec2_instance_identity_document_ramdiskid:
+ description: The ID of the RAM disk specified at launch time, if applicable.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_document_region:
+ description: The Region in which the instance launched.
+ type: str
+ sample: "us-east-1"
+ ansible_ec2_instance_identity_document_version:
+ description: Identity document version.
+ type: str
+ sample: "2010-08-31"
+ ansible_ec2_instance_identity_pkcs7:
+ description: Used to verify the document's authenticity and content against the signature.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_rsa2048:
+ description: Used to verify the document's authenticity and content against the signature.
+ type: str
+ sample: ""
+ ansible_ec2_instance_identity_signature:
+ description: Data that can be used by other parties to verify its origin and authenticity.
+ type: str
+ sample: ""
+ ansible_ec2_instance_life_cycle:
+ description: The purchasing option of the instance.
+ type: str
+ sample: "on-demand"
+ ansible_ec2_instance_type:
+ description: The type of the instance.
+ type: str
+ sample: "m4.large"
+ ansible_ec2_local_hostname:
+ description:
+ - The private IPv4 DNS hostname of the instance.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "ip-10-0-0-1.ec2.internal"
+ ansible_ec2_local_ipv4:
+ description:
+ - The private IPv4 address of the instance.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "10.0.0.1"
+ ansible_ec2_mac:
+ description:
+ - The instance's media access control (MAC) address.
+ - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
+ type: str
+ sample: "00:11:22:33:44:55"
+ ansible_ec2_metrics_vhostmd:
+ description: Metrics.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_<mac address>_device_number:
+ description:
+ - The unique device number associated with that interface. The device number corresponds to the device name;
+ for example, a device-number of 2 is for the eth2 device.
+ - This category corresponds to the DeviceIndex and device-index fields that are used by the Amazon EC2 API and the EC2 commands for the AWS CLI.
+ type: str
+ sample: "0"
+ ansible_ec2_network_interfaces_macs_<mac address>_interface_id:
+ description: The elastic network interface ID.
+ type: str
+ sample: "eni-12345678"
+ ansible_ec2_network_interfaces_macs_<mac address>_ipv4_associations_<ip address>:
+ description: The private IPv4 addresses that are associated with each public-ip address and assigned to that interface.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_<mac address>_ipv6s:
+ description: The IPv6 addresses associated with the interface. Returned only for instances launched into a VPC.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_<mac address>_local_hostname:
+ description: The interface's local hostname.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_<mac address>_local_ipv4s:
+ description: The private IPv4 addresses associated with the interface.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_<mac address>_mac:
+ description: The instance's MAC address.
+ type: str
+ sample: "00:11:22:33:44:55"
+ ansible_ec2_network_interfaces_macs_<mac address>_owner_id:
+ description:
+ - The ID of the owner of the network interface.
+ - In multiple-interface environments, an interface can be attached by a third party, such as Elastic Load Balancing.
+ - Traffic on an interface is always billed to the interface owner.
+ type: str
+ sample: "01234567890"
+ ansible_ec2_network_interfaces_macs_<mac address>_public_hostname:
+ description:
+ - The interface's public DNS (IPv4). If the instance is in a VPC,
+ this category is only returned if the enableDnsHostnames attribute is set to true.
+ type: str
+ sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
+ ansible_ec2_network_interfaces_macs_<mac address>_public_ipv4s:
+ description: The Elastic IP addresses associated with the interface. There may be multiple IPv4 addresses on an instance.
+ type: str
+ sample: "1.2.3.4"
+ ansible_ec2_network_interfaces_macs_<mac address>_security_group_ids:
+ description: The IDs of the security groups to which the network interface belongs. Returned only for instances launched into a VPC.
+ type: str
+ sample: "sg-01234567,sg-01234568"
+ ansible_ec2_network_interfaces_macs_<mac address>_security_groups:
+ description: Security groups to which the network interface belongs. Returned only for instances launched into a VPC.
+ type: str
+ sample: "secgroup1,secgroup2"
+ ansible_ec2_network_interfaces_macs_<mac address>_subnet_id:
+ description: The ID of the subnet in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "subnet-01234567"
+ ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv4_cidr_block:
+ description: The IPv4 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "10.0.1.0/24"
+ ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv6_cidr_blocks:
+ description: The IPv6 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: ""
+ ansible_ec2_network_interfaces_macs_<mac address>_vpc_id:
+ description: The ID of the VPC in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "vpc-0123456"
+ ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_block:
+ description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "10.0.0.0/16"
+ ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_blocks:
+ description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: "10.0.0.0/16"
+ ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv6_cidr_blocks:
+ description: The IPv6 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
+ type: str
+ sample: ""
+ ansible_ec2_placement_availability_zone:
+ description: The Availability Zone in which the instance launched.
+ type: str
+ sample: "us-east-1a"
+ ansible_ec2_placement_region:
+ description: The Region in which the instance launched.
+ type: str
+ sample: "us-east-1"
+ ansible_ec2_product_codes:
+ description: Product codes associated with the instance, if any.
+ type: str
+ sample: "aw0evgkw8e5c1q413zgy5pjce"
+ ansible_ec2_profile:
+ description: EC2 instance hardware profile.
+ type: str
+ sample: "default-hvm"
+ ansible_ec2_public_hostname:
+ description:
+ - The instance's public DNS. If the instance is in a VPC, this category is only returned if the enableDnsHostnames attribute is set to true.
+ type: str
+ sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
+ ansible_ec2_public_ipv4:
+ description: The public IPv4 address. If an Elastic IP address is associated with the instance, the value returned is the Elastic IP address.
+ type: str
+ sample: "1.2.3.4"
+ ansible_ec2_public_key:
+ description: Public key. Only available if supplied at instance launch time.
+ type: str
+ sample: ""
+ ansible_ec2_ramdisk_id:
+ description: The ID of the RAM disk specified at launch time, if applicable.
+ type: str
+ sample: ""
+ ansible_ec2_reservation_id:
+ description: The ID of the reservation.
+ type: str
+ sample: "r-0123456789abcdef0"
+ ansible_ec2_security_groups:
+ description:
+ - The names of the security groups applied to the instance. After launch, you can only change the security groups of instances running in a VPC.
+ - Such changes are reflected here and in network/interfaces/macs/mac/security-groups.
+ type: str
+ sample: "securitygroup1,securitygroup2"
+ ansible_ec2_services_domain:
+ description: The domain for AWS resources for the region; for example, amazonaws.com for us-east-1.
+ type: str
+ sample: "amazonaws.com"
+ ansible_ec2_services_partition:
+ description:
+ - The partition that the resource is in. For standard AWS regions, the partition is aws.
+ - If you have resources in other partitions, the partition is aws-partitionname.
+ - For example, the partition for resources in the China (Beijing) region is aws-cn.
+ type: str
+ sample: "aws"
+ ansible_ec2_spot_termination_time:
+ description:
+ - The approximate time, in UTC, that the operating system for your Spot instance will receive the shutdown signal.
+ - This item is present and contains a time value only if the Spot instance has been marked for termination by Amazon EC2.
+ - The termination-time item is not set to a time if you terminated the Spot instance yourself.
+ type: str
+ sample: "2015-01-05T18:02:00Z"
+ ansible_ec2_user_data:
+ description: The instance user data.
+ type: str
+ sample: "#!/bin/bash"
+'''
+
+import json
+import re
+import socket
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+socket.setdefaulttimeout(5)
+
+
+class Ec2Metadata(object):
+ ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
+ ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
+ ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
+ ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/'
+
+ def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None, ec2_dynamicdata_uri=None):
+ self.module = module
+ self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
+ self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
+ self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
+ self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri
+ self._data = {}
+ self._prefix = 'ansible_ec2_%s'
+
+ def _fetch(self, url):
+ encoded_url = quote(url, safe='%/:=&?~#+!$,;\'@()*[]')
+ response, info = fetch_url(self.module, encoded_url, force=True)
+
+ if info.get('status') not in (200, 404):
+ time.sleep(3)
+ # request went bad, retry once then raise
+ self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg']))
+ response, info = fetch_url(self.module, encoded_url, force=True)
+ if info.get('status') not in (200, 404):
+ # fail out now
+ self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info)
+ if response:
+ data = response.read()
+ else:
+ data = None
+ return to_text(data)
+
+ def _mangle_fields(self, fields, uri, filter_patterns=None):
+ filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns
+
+ new_fields = {}
+ for key, value in fields.items():
+ split_fields = key[len(uri):].split('/')
+ # Parse out the IAM role name (which is _not_ the same as the instance profile name)
+ if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and ':' not in split_fields[2]:
+ new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2]
+ if len(split_fields) > 1 and split_fields[1]:
+ new_key = "-".join(split_fields)
+ new_fields[self._prefix % new_key] = value
+ else:
+ new_key = "".join(split_fields)
+ new_fields[self._prefix % new_key] = value
+ for pattern in filter_patterns:
+ for key in dict(new_fields):
+ match = re.search(pattern, key)
+ if match:
+ new_fields.pop(key)
+ return new_fields
+
+ def fetch(self, uri, recurse=True):
+ raw_subfields = self._fetch(uri)
+ if not raw_subfields:
+ return
+ subfields = raw_subfields.split('\n')
+ for field in subfields:
+ if field.endswith('/') and recurse:
+ self.fetch(uri + field)
+ if uri.endswith('/'):
+ new_uri = uri + field
+ else:
+ new_uri = uri + '/' + field
+ if new_uri not in self._data and not new_uri.endswith('/'):
+ content = self._fetch(new_uri)
+ if field == 'security-groups' or field == 'security-group-ids':
+ sg_fields = ",".join(content.split('\n'))
+ self._data['%s' % (new_uri)] = sg_fields
+ else:
+ try:
+ dict = json.loads(content)
+ self._data['%s' % (new_uri)] = content
+ for (key, value) in dict.items():
+ self._data['%s:%s' % (new_uri, key.lower())] = value
+ except Exception:
+ self._data['%s' % (new_uri)] = content # not a stringified JSON string
+
+ def fix_invalid_varnames(self, data):
+ """Change ':'' and '-' to '_' to ensure valid template variable names"""
+ new_data = data.copy()
+ for key, value in data.items():
+ if ':' in key or '-' in key:
+ newkey = re.sub(':|-', '_', key)
+ new_data[newkey] = value
+ del new_data[key]
+
+ return new_data
+
+ def run(self):
+ self.fetch(self.uri_meta) # populate _data with metadata
+ data = self._mangle_fields(self._data, self.uri_meta)
+ data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
+ data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
+
+ self._data = {} # clear out metadata in _data
+ self.fetch(self.uri_dynamic) # populate _data with dynamic data
+ dyndata = self._mangle_fields(self._data, self.uri_dynamic)
+ data.update(dyndata)
+ data = self.fix_invalid_varnames(data)
+
+ # Maintain old key for backwards compatibility
+ if 'ansible_ec2_instance_identity_document_region' in data:
+ data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region']
+ return data
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ ec2_metadata_facts = Ec2Metadata(module).run()
+ ec2_metadata_facts_result = dict(changed=False, ansible_facts=ec2_metadata_facts)
+
+ module.exit_json(**ec2_metadata_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py
new file mode 100644
index 00000000..cf4762dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_snapshot
+version_added: 1.0.0
+short_description: Creates a snapshot from an existing volume
+description:
+ - Creates an EC2 snapshot from an existing EBS volume.
+options:
+ volume_id:
+ description:
+ - Volume from which to take the snapshot.
+ required: false
+ type: str
+ description:
+ description:
+ - Description to be applied to the snapshot.
+ required: false
+ type: str
+ instance_id:
+ description:
+ - Instance that has the required volume to snapshot mounted.
+ required: false
+ type: str
+ device_name:
+ description:
+ - Device name of a mounted volume to be snapshotted.
+ required: false
+ type: str
+ snapshot_tags:
+ description:
+ - A dictionary of tags to add to the snapshot.
+ type: dict
+ required: false
+ wait:
+ description:
+ - Wait for the snapshot to be ready.
+ type: bool
+ required: false
+ default: yes
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ - Specify 0 to wait forever.
+ required: false
+ default: 0
+ type: int
+ state:
+ description:
+ - Whether to add or create a snapshot.
+ required: false
+ default: present
+ choices: ['absent', 'present']
+ type: str
+ snapshot_id:
+ description:
+ - Snapshot id to remove.
+ required: false
+ type: str
+ last_snapshot_min_age:
+ description:
+ - If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
+ required: false
+ default: 0
+ type: int
+
+author: "Will Thames (@willthames)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Simple snapshot of volume using volume_id
+- amazon.aws.ec2_snapshot:
+ volume_id: vol-abcdef12
+ description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
+
+# Snapshot of volume mounted on device_name attached to instance_id
+- amazon.aws.ec2_snapshot:
+ instance_id: i-12345678
+ device_name: /dev/sdb1
+ description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
+
+# Snapshot of volume with tagging
+- amazon.aws.ec2_snapshot:
+ instance_id: i-12345678
+ device_name: /dev/sdb1
+ snapshot_tags:
+ frequency: hourly
+ source: /data
+
+# Remove a snapshot
+- amazon.aws.ec2_snapshot:
+ snapshot_id: snap-abcd1234
+ state: absent
+
+# Create a snapshot only if the most recent one is older than 1 hour
+- amazon.aws.ec2_snapshot:
+ volume_id: vol-abcdef12
+ last_snapshot_min_age: 60
+'''
+
+RETURN = '''
+snapshot_id:
+ description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
+ type: str
+ returned: always
+ sample: snap-01234567
+tags:
+ description: Any tags assigned to the snapshot.
+ type: dict
+ returned: always
+ sample: "{ 'Name': 'instance-name' }"
+volume_id:
+ description: The ID of the volume that was used to create the snapshot.
+ type: str
+ returned: always
+ sample: vol-01234567
+volume_size:
+ description: The size of the volume, in GiB.
+ type: int
+ returned: always
+ sample: 8
+'''
+
+import time
+import datetime
+
+try:
+ import boto.exception
+except ImportError:
+ pass # Taken care of by ec2.HAS_BOTO
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import HAS_BOTO
+from ..module_utils.ec2 import ec2_connect
+
+
+# Find the most recent snapshot
+def _get_snapshot_starttime(snap):
+ return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.%fZ')
+
+
+def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
+ """
+ Gets the most recently created snapshot and optionally filters the result
+ if the snapshot is too old
+ :param snapshots: list of snapshots to search
+ :param max_snapshot_age_secs: filter the result if its older than this
+ :param now: simulate time -- used for unit testing
+ :return:
+ """
+ if len(snapshots) == 0:
+ return None
+
+ if not now:
+ now = datetime.datetime.utcnow()
+
+ youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
+
+ # See if the snapshot is younger that the given max age
+ snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.%fZ')
+ snapshot_age = now - snapshot_start
+
+ if max_snapshot_age_secs is not None:
+ if snapshot_age.total_seconds() > max_snapshot_age_secs:
+ return None
+
+ return youngest_snapshot
+
+
+def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
+ """
+ Wait for the snapshot to be created
+ :param snapshot:
+ :param wait_timeout_secs: fail this step after this many seconds
+ :param sleep_func:
+ :return:
+ """
+ time_waited = 0
+ snapshot.update()
+ while snapshot.status != 'completed':
+ sleep_func(3)
+ snapshot.update()
+ time_waited += 3
+ if wait_timeout_secs and time_waited > wait_timeout_secs:
+ return False
+ return True
+
+
+def create_snapshot(module, ec2, state=None, description=None, wait=None,
+ wait_timeout=None, volume_id=None, instance_id=None,
+ snapshot_id=None, device_name=None, snapshot_tags=None,
+ last_snapshot_min_age=None):
+ snapshot = None
+ changed = False
+
+ required = [volume_id, snapshot_id, instance_id]
+ if required.count(None) != len(required) - 1: # only 1 must be set
+ module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
+ if instance_id and not device_name or device_name and not instance_id:
+ module.fail_json(msg='Instance ID and device name must both be specified')
+
+ if instance_id:
+ try:
+ volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
+ except boto.exception.BotoServerError as e:
+ module.fail_json_aws(e)
+
+ if not volumes:
+ module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
+
+ volume_id = volumes[0].id
+
+ if state == 'absent':
+ if not snapshot_id:
+ module.fail_json(msg='snapshot_id must be set when state is absent')
+ try:
+ ec2.delete_snapshot(snapshot_id)
+ except boto.exception.BotoServerError as e:
+ # exception is raised if snapshot does not exist
+ if e.error_code == 'InvalidSnapshot.NotFound':
+ module.exit_json(changed=False)
+ else:
+ module.fail_json_aws(e)
+
+ # successful delete
+ module.exit_json(changed=True)
+
+ if last_snapshot_min_age > 0:
+ try:
+ current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
+ except boto.exception.BotoServerError as e:
+ module.fail_json_aws(e)
+
+ last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
+ snapshot = _get_most_recent_snapshot(current_snapshots,
+ max_snapshot_age_secs=last_snapshot_min_age)
+ try:
+ # Create a new snapshot if we didn't find an existing one to use
+ if snapshot is None:
+ snapshot = ec2.create_snapshot(volume_id, description=description)
+ changed = True
+ if wait:
+ if not _create_with_wait(snapshot, wait_timeout):
+ module.fail_json(msg='Timed out while creating snapshot.')
+ if snapshot_tags:
+ for k, v in snapshot_tags.items():
+ snapshot.add_tag(k, v)
+ except boto.exception.BotoServerError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=changed,
+ snapshot_id=snapshot.id,
+ volume_id=snapshot.volume_id,
+ volume_size=snapshot.volume_size,
+ tags=snapshot.tags.copy())
+
+
+def create_snapshot_ansible_module():
+ argument_spec = dict(
+ volume_id=dict(),
+ description=dict(),
+ instance_id=dict(),
+ snapshot_id=dict(),
+ device_name=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=0),
+ last_snapshot_min_age=dict(type='int', default=0),
+ snapshot_tags=dict(type='dict', default=dict()),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False)
+ return module
+
+
+def main():
+ module = create_snapshot_ansible_module()
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ volume_id = module.params.get('volume_id')
+ snapshot_id = module.params.get('snapshot_id')
+ description = module.params.get('description')
+ instance_id = module.params.get('instance_id')
+ device_name = module.params.get('device_name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ last_snapshot_min_age = module.params.get('last_snapshot_min_age')
+ snapshot_tags = module.params.get('snapshot_tags')
+ state = module.params.get('state')
+
+ ec2 = ec2_connect(module)
+
+ create_snapshot(
+ module=module,
+ state=state,
+ description=description,
+ wait=wait,
+ wait_timeout=wait_timeout,
+ ec2=ec2,
+ volume_id=volume_id,
+ instance_id=instance_id,
+ snapshot_id=snapshot_id,
+ device_name=device_name,
+ snapshot_tags=snapshot_tags,
+ last_snapshot_min_age=last_snapshot_min_age
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_facts.py
new file mode 100644
index 00000000..d2b29f04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_facts.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_snapshot_info
+version_added: 1.0.0
+short_description: Gather information about ec2 volume snapshots in AWS
+description:
+ - Gather information about ec2 volume snapshots in AWS.
+ - This module was called C(ec2_snapshot_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ snapshot_ids:
+ description:
+ - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ owner_ids:
+ description:
+ - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have
+ access are returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ restorable_by_user_ids:
+ description:
+ - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are
+ returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ type: dict
+ default: {}
+notes:
+ - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by
+ the account use the filter 'owner-id'.
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all snapshots, including public ones
+- amazon.aws.ec2_snapshot_info:
+
+# Gather information about all snapshots owned by the account 0123456789
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ owner-id: 0123456789
+
+# Or alternatively...
+- amazon.aws.ec2_snapshot_info:
+ owner_ids:
+ - 0123456789
+
+# Gather information about a particular snapshot using ID
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ snapshot-id: snap-00112233
+
+# Or alternatively...
+- amazon.aws.ec2_snapshot_info:
+ snapshot_ids:
+ - snap-00112233
+
+# Gather information about any snapshot with a tag key Name and value Example
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any snapshot with an error status
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ status: error
+
+'''
+
+RETURN = '''
+snapshot_id:
+ description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
+ type: str
+ returned: always
+ sample: snap-01234567
+volume_id:
+ description: The ID of the volume that was used to create the snapshot.
+ type: str
+ returned: always
+ sample: vol-01234567
+state:
+ description: The snapshot state (completed, pending or error).
+ type: str
+ returned: always
+ sample: completed
+state_message:
+ description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
+ AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
+ error occurred.
+ type: str
+ returned: always
+ sample:
+start_time:
+ description: The time stamp when the snapshot was initiated.
+ type: str
+ returned: always
+ sample: "2015-02-12T02:14:02+00:00"
+progress:
+ description: The progress of the snapshot, as a percentage.
+ type: str
+ returned: always
+ sample: "100%"
+owner_id:
+ description: The AWS account ID of the EBS snapshot owner.
+ type: str
+ returned: always
+ sample: "099720109477"
+description:
+ description: The description for the snapshot.
+ type: str
+ returned: always
+ sample: "My important backup"
+volume_size:
+ description: The size of the volume, in GiB.
+ type: int
+ returned: always
+ sample: 8
+owner_alias:
+ description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
+ type: str
+ returned: always
+ sample: "033440102211"
+tags:
+ description: Any tags assigned to the snapshot.
+ type: dict
+ returned: always
+ sample: "{ 'my_tag_key': 'my_tag_value' }"
+encrypted:
+ description: Indicates whether the snapshot is encrypted.
+ type: bool
+ returned: always
+ sample: "True"
+kms_key_id:
+ description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
+ protect the volume encryption key for the parent volume.
+ type: str
+ returned: always
+ sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456"
+data_encryption_key_id:
+ description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
+ corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
+ type: str
+ returned: always
+ sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
+
+'''
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_snapshots(connection, module):
+
+ snapshot_ids = module.params.get("snapshot_ids")
+ owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")]
+ restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")]
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ snapshots = connection.describe_snapshots(
+ aws_retry=True,
+ SnapshotIds=snapshot_ids, OwnerIds=owner_ids,
+ RestorableByUserIds=restorable_by_user_ids, Filters=filters)
+ except is_boto3_error_code('InvalidSnapshot.NotFound') as e:
+ if len(snapshot_ids) > 1:
+ module.warn("Some of your snapshots may exist, but %s" % str(e))
+ snapshots = {'Snapshots': []}
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to describe snapshots')
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_snapshots = []
+ for snapshot in snapshots['Snapshots']:
+ snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for snapshot in snaked_snapshots:
+ if 'tags' in snapshot:
+ snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value')
+
+ module.exit_json(snapshots=snaked_snapshots)
+
+
+def main():
+
+ argument_spec = dict(
+ snapshot_ids=dict(default=[], type='list', elements='str'),
+ owner_ids=dict(default=[], type='list', elements='str'),
+ restorable_by_user_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
+ ],
+ supports_check_mode=True
+ )
+ if module._name == 'ec2_snapshot_facts':
+ module.deprecate("The 'ec2_snapshot_facts' module has been renamed to 'ec2_snapshot_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_snapshots(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py
new file mode 100644
index 00000000..d2b29f04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_snapshot_info
+version_added: 1.0.0
+short_description: Gather information about ec2 volume snapshots in AWS
+description:
+ - Gather information about ec2 volume snapshots in AWS.
+ - This module was called C(ec2_snapshot_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ snapshot_ids:
+ description:
+ - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ owner_ids:
+ description:
+ - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have
+ access are returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ restorable_by_user_ids:
+ description:
+ - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are
+ returned.
+ required: false
+ default: []
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ type: dict
+ default: {}
+notes:
+ - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by
+ the account use the filter 'owner-id'.
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all snapshots, including public ones
+- amazon.aws.ec2_snapshot_info:
+
+# Gather information about all snapshots owned by the account 0123456789
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ owner-id: 0123456789
+
+# Or alternatively...
+- amazon.aws.ec2_snapshot_info:
+ owner_ids:
+ - 0123456789
+
+# Gather information about a particular snapshot using ID
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ snapshot-id: snap-00112233
+
+# Or alternatively...
+- amazon.aws.ec2_snapshot_info:
+ snapshot_ids:
+ - snap-00112233
+
+# Gather information about any snapshot with a tag key Name and value Example
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any snapshot with an error status
+- amazon.aws.ec2_snapshot_info:
+ filters:
+ status: error
+
+'''
+
+RETURN = '''
+snapshot_id:
+ description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
+ type: str
+ returned: always
+ sample: snap-01234567
+volume_id:
+ description: The ID of the volume that was used to create the snapshot.
+ type: str
+ returned: always
+ sample: vol-01234567
+state:
+ description: The snapshot state (completed, pending or error).
+ type: str
+ returned: always
+ sample: completed
+state_message:
+ description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
+ AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
+ error occurred.
+ type: str
+ returned: always
+ sample:
+start_time:
+ description: The time stamp when the snapshot was initiated.
+ type: str
+ returned: always
+ sample: "2015-02-12T02:14:02+00:00"
+progress:
+ description: The progress of the snapshot, as a percentage.
+ type: str
+ returned: always
+ sample: "100%"
+owner_id:
+ description: The AWS account ID of the EBS snapshot owner.
+ type: str
+ returned: always
+ sample: "099720109477"
+description:
+ description: The description for the snapshot.
+ type: str
+ returned: always
+ sample: "My important backup"
+volume_size:
+ description: The size of the volume, in GiB.
+ type: int
+ returned: always
+ sample: 8
+owner_alias:
+ description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
+ type: str
+ returned: always
+ sample: "033440102211"
+tags:
+ description: Any tags assigned to the snapshot.
+ type: dict
+ returned: always
+ sample: "{ 'my_tag_key': 'my_tag_value' }"
+encrypted:
+ description: Indicates whether the snapshot is encrypted.
+ type: bool
+ returned: always
+ sample: "True"
+kms_key_id:
+ description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
+ protect the volume encryption key for the parent volume.
+ type: str
+ returned: always
+ sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456"
+data_encryption_key_id:
+ description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
+ corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
+ type: str
+ returned: always
+ sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
+
+'''
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_snapshots(connection, module):
+
+ snapshot_ids = module.params.get("snapshot_ids")
+ owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")]
+ restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")]
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ snapshots = connection.describe_snapshots(
+ aws_retry=True,
+ SnapshotIds=snapshot_ids, OwnerIds=owner_ids,
+ RestorableByUserIds=restorable_by_user_ids, Filters=filters)
+ except is_boto3_error_code('InvalidSnapshot.NotFound') as e:
+ if len(snapshot_ids) > 1:
+ module.warn("Some of your snapshots may exist, but %s" % str(e))
+ snapshots = {'Snapshots': []}
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to describe snapshots')
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_snapshots = []
+ for snapshot in snapshots['Snapshots']:
+ snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for snapshot in snaked_snapshots:
+ if 'tags' in snapshot:
+ snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value')
+
+ module.exit_json(snapshots=snaked_snapshots)
+
+
+def main():
+
+ argument_spec = dict(
+ snapshot_ids=dict(default=[], type='list', elements='str'),
+ owner_ids=dict(default=[], type='list', elements='str'),
+ restorable_by_user_ids=dict(default=[], type='list', elements='str'),
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
+ ],
+ supports_check_mode=True
+ )
+ if module._name == 'ec2_snapshot_facts':
+ module.deprecate("The 'ec2_snapshot_facts' module has been renamed to 'ec2_snapshot_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ list_ec2_snapshots(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py
new file mode 100644
index 00000000..1d8a1e6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_tag
+version_added: 1.0.0
+short_description: create and remove tags on ec2 resources
+description:
+ - Creates, modifies and removes tags for any EC2 resource.
+ - Resources are referenced by their resource id (for example, an instance being i-XXXXXXX, a VPC being vpc-XXXXXXX).
+ - This module is designed to be used with complex args (tags), see the examples.
+requirements: [ "boto3", "botocore" ]
+options:
+ resource:
+ description:
+ - The EC2 resource id.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the tags should be present or absent on the resource.
+ - The use of I(state=list) to interrogate the tags of an instance has been
+ deprecated and will be removed after 2022-06-01. The 'list'
+ functionality has been moved to a dedicated module M(amazon.aws.ec2_tag_info).
+ default: present
+ choices: ['present', 'absent', 'list']
+ type: str
+ tags:
+ description:
+ - A dictionary of tags to add or remove from the resource.
+ - If the value provided for a key is not set and I(state=absent), the tag will be removed regardless of its current value.
+ - Required when I(state=present) or I(state=absent).
+ type: dict
+ purge_tags:
+ description:
+ - Whether unspecified tags should be removed from the resource.
+ - Note that when combined with I(state=absent), specified tags with non-matching values are not purged.
+ type: bool
+ default: false
+
+author:
+ - Lester Wade (@lwade)
+ - Paul Arthur (@flowerysong)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Ensure tags are present on a resource
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: vol-XXXXXX
+ state: present
+ tags:
+ Name: ubervol
+ env: prod
+
+- name: Ensure all volumes are tagged
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: '{{ item.id }}'
+ state: present
+ tags:
+ Name: dbserver
+ Env: production
+ loop: '{{ ec2_vol.volumes }}'
+
+- name: Remove the Env tag
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: i-xxxxxxxxxxxxxxxxx
+ tags:
+ Env:
+ state: absent
+
+- name: Remove the Env tag if it's currently 'development'
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: i-xxxxxxxxxxxxxxxxx
+ tags:
+ Env: development
+ state: absent
+
+- name: Remove all tags except for Name from an instance
+ amazon.aws.ec2_tag:
+ region: eu-west-1
+ resource: i-xxxxxxxxxxxxxxxxx
+ tags:
+ Name: ''
+ state: absent
+ purge_tags: true
+'''
+
+RETURN = '''
+tags:
+ description: A dict containing the tags on the resource
+ returned: always
+ type: dict
+added_tags:
+ description: A dict of tags that were added to the resource
+ returned: If tags were added
+ type: dict
+removed_tags:
+ description: A dict of tags that were removed from the resource
+ returned: If tags were removed
+ type: dict
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ..module_utils.ec2 import compare_aws_tags
+
+
+def get_tags(ec2, module, resource):
+ filters = [{'Name': 'resource-id', 'Values': [resource]}]
+ try:
+ result = AWSRetry.jittered_backoff()(ec2.describe_tags)(Filters=filters)
+ return boto3_tag_list_to_ansible_dict(result['Tags'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
+
+
+def main():
+ argument_spec = dict(
+ resource=dict(required=True),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'list']),
+ )
+ required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
+
+ resource = module.params['resource']
+ tags = module.params['tags']
+ state = module.params['state']
+ purge_tags = module.params['purge_tags']
+
+ result = {'changed': False}
+
+ ec2 = module.client('ec2')
+
+ current_tags = get_tags(ec2, module, resource)
+
+ if state == 'list':
+ module.deprecate(
+ 'Using the "list" state has been deprecated. Please use the ec2_tag_info module instead', date='2022-06-01', collection_name='amazon.aws')
+ module.exit_json(changed=False, tags=current_tags)
+
+ add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
+
+ remove_tags = {}
+ if state == 'absent':
+ for key in tags:
+ if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
+ remove_tags[key] = current_tags[key]
+
+ for key in remove:
+ remove_tags[key] = current_tags[key]
+
+ if remove_tags:
+ result['changed'] = True
+ result['removed_tags'] = remove_tags
+ if not module.check_mode:
+ try:
+ AWSRetry.jittered_backoff()(ec2.delete_tags)(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(remove_tags))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
+
+ if state == 'present' and add_tags:
+ result['changed'] = True
+ result['added_tags'] = add_tags
+ current_tags.update(add_tags)
+ if not module.check_mode:
+ try:
+ AWSRetry.jittered_backoff()(ec2.create_tags)(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(add_tags))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
+
+ result['tags'] = get_tags(ec2, module, resource)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py
new file mode 100644
index 00000000..947ce363
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_tag_info
+version_added: 1.0.0
+short_description: list tags on ec2 resources
+description:
+ - Lists tags for any EC2 resource.
+ - Resources are referenced by their resource id (e.g. an instance being i-XXXXXXX, a vpc being vpc-XXXXXX).
+ - Resource tags can be managed using the M(amazon.aws.ec2_tag) module.
+requirements: [ "boto3", "botocore" ]
+options:
+ resource:
+ description:
+ - The EC2 resource id (for example i-XXXXXX or vpc-XXXXXX).
+ required: true
+ type: str
+
+author:
+ - Mark Chappell (@tremble)
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+- name: Retrieve all tags on an instance
+ amazon.aws.ec2_tag_info:
+ region: eu-west-1
+ resource: i-xxxxxxxxxxxxxxxxx
+ register: instance_tags
+
+- name: Retrieve all tags on a VPC
+ amazon.aws.ec2_tag_info:
+ region: eu-west-1
+ resource: vpc-xxxxxxxxxxxxxxxxx
+ register: vpc_tags
+'''
+
+RETURN = '''
+tags:
+ description: A dict containing the tags on the resource
+ returned: always
+ type: dict
+'''
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except Exception:
+ pass # Handled by AnsibleAWSModule
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict, AWSRetry
+
+
+@AWSRetry.jittered_backoff()
+def get_tags(ec2, module, resource):
+ filters = [{'Name': 'resource-id', 'Values': [resource]}]
+ return boto3_tag_list_to_ansible_dict(ec2.describe_tags(Filters=filters)['Tags'])
+
+
+def main():
+ argument_spec = dict(
+ resource=dict(required=True),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ resource = module.params['resource']
+ ec2 = module.client('ec2')
+
+ try:
+ current_tags = get_tags(ec2, module, resource)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
+
+ module.exit_json(changed=False, tags=current_tags)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
new file mode 100644
index 00000000..fb85a85d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
@@ -0,0 +1,809 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vol
+version_added: 1.0.0
+short_description: Create and attach a volume, return volume id and device map
+description:
+ - Creates an EBS volume and optionally attaches it to an instance.
+ - If both I(instance) and I(name) are given and the instance has a device at the device name, then no volume is created and no attachment is made.
+ - This module has a dependency on python-boto.
+options:
+ instance:
+ description:
+ - Instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
+ type: str
+ name:
+ description:
+ - Volume Name tag if you wish to attach an existing volume (requires instance)
+ type: str
+ id:
+ description:
+ - Volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
+ type: str
+ volume_size:
+ description:
+ - Size of volume (in GiB) to create.
+ type: int
+ volume_type:
+ description:
+ - Type of EBS volume; standard (magnetic), gp2 (SSD), gp3 (SSD), io1 (Provisioned IOPS), io2 (Provisioned IOPS),
+ st1 (Throughput Optimized HDD), sc1 (Cold HDD).
+ "Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility.
+ default: standard
+ choices: ['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']
+ type: str
+ iops:
+ description:
+ - The provisioned IOPs you want to associate with this volume (integer).
+ - By default AWS will set this to 100.
+ type: int
+ encrypted:
+ description:
+ - Enable encryption at rest for this volume.
+ default: false
+ type: bool
+ kms_key_id:
+ description:
+ - Specify the id of the KMS key to use.
+ type: str
+ device_name:
+ description:
+ - Device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
+ type: str
+ delete_on_termination:
+ description:
+ - When set to C(true), the volume will be deleted upon instance termination.
+ type: bool
+ default: false
+ zone:
+ description:
+ - Zone in which to create the volume, if unset uses the zone the instance is in (if set).
+ aliases: ['availability_zone', 'aws_zone', 'ec2_zone']
+ type: str
+ snapshot:
+ description:
+ - Snapshot ID on which to base the volume.
+ type: str
+ validate_certs:
+ description:
+ - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
+ type: bool
+ default: true
+ state:
+ description:
+ - Whether to ensure the volume is present or absent.
+ - The use of I(state=list) to interrogate the volume has been deprecated
+ and will be removed after 2022-06-01. The 'list' functionality
+ has been moved to a dedicated module M(amazon.aws.ec2_vol_info).
+ default: present
+ choices: ['absent', 'present', 'list']
+ type: str
+ tags:
+ description:
+ - tag:value pairs to add to the volume after creation.
+ default: {}
+ type: dict
+ modify_volume:
+ description:
+ - The volume won't be modify unless this key is C(true).
+ type: bool
+ default: false
+ version_added: 1.4.0
+ throughput:
+ description:
+ - Volume throughput in MB/s.
+ - This parameter is only valid for gp3 volumes.
+ - Valid range is from 125 to 1000.
+ type: int
+ version_added: 1.4.0
+author: "Lester Wade (@lwade)"
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements: [ boto3>=1.16.33 ]
+'''
+
+EXAMPLES = '''
+# Simple attachment action
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ volume_size: 5
+ device_name: sdd
+ region: us-west-2
+
+# Example using custom iops params
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ volume_size: 5
+ iops: 100
+ device_name: sdd
+ region: us-west-2
+
+# Example using snapshot id
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ snapshot: "{{ snapshot }}"
+
+# Playbook example combined with instance launch
+- amazon.aws.ec2:
+ keypair: "{{ keypair }}"
+ image: "{{ image }}"
+ wait: yes
+ count: 3
+ register: ec2
+- amazon.aws.ec2_vol:
+ instance: "{{ item.id }}"
+ volume_size: 5
+ loop: "{{ ec2.instances }}"
+ register: ec2_vol
+
+# Example: Launch an instance and then add a volume if not already attached
+# * Volume will be created with the given name if not already created.
+# * Nothing will happen if the volume is already attached.
+# * Requires Ansible 2.0
+
+- amazon.aws.ec2:
+ keypair: "{{ keypair }}"
+ image: "{{ image }}"
+ zone: YYYYYY
+ id: my_instance
+ wait: yes
+ count: 1
+ register: ec2
+
+- amazon.aws.ec2_vol:
+ instance: "{{ item.id }}"
+ name: my_existing_volume_Name_tag
+ device_name: /dev/xvdf
+ loop: "{{ ec2.instances }}"
+ register: ec2_vol
+
+# Remove a volume
+- amazon.aws.ec2_vol:
+ id: vol-XXXXXXXX
+ state: absent
+
+# Detach a volume (since 1.9)
+- amazon.aws.ec2_vol:
+ id: vol-XXXXXXXX
+ instance: None
+ region: us-west-2
+
+# List volumes for an instance
+- amazon.aws.ec2_vol:
+ instance: i-XXXXXX
+ state: list
+ region: us-west-2
+
+# Create new volume using SSD storage
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ volume_size: 50
+ volume_type: gp2
+ device_name: /dev/xvdf
+
+# Attach an existing volume to instance. The volume will be deleted upon instance termination.
+- amazon.aws.ec2_vol:
+ instance: XXXXXX
+ id: XXXXXX
+ device_name: /dev/sdf
+ delete_on_termination: yes
+'''
+
+RETURN = '''
+device:
+ description: device name of attached volume
+ returned: when success
+ type: str
+ sample: "/def/sdf"
+volume_id:
+ description: the id of volume
+ returned: when success
+ type: str
+ sample: "vol-35b333d9"
+volume_type:
+ description: the volume type
+ returned: when success
+ type: str
+ sample: "standard"
+volume:
+ description: a dictionary containing detailed attributes of the volume
+ returned: when success
+ type: str
+ sample: {
+ "attachment_set": {
+ "attach_time": "2015-10-23T00:22:29.000Z",
+ "deleteOnTermination": "false",
+ "device": "/dev/sdf",
+ "instance_id": "i-8356263c",
+ "status": "attached"
+ },
+ "create_time": "2015-10-21T14:36:08.870Z",
+ "encrypted": false,
+ "id": "vol-35b333d9",
+ "iops": null,
+ "size": 1,
+ "snapshot_id": "",
+ "status": "in-use",
+ "tags": {
+ "env": "dev"
+ },
+ "type": "standard",
+ "zone": "us-east-1b"
+ }
+'''
+
+import time
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import camel_dict_to_snake_dict
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import compare_aws_tags
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.core import is_boto3_error_code
+
+try:
+ import botocore
+except ImportError:
+ pass # Taken care of by AnsibleAWSModule
+
+
+def get_instance(module, ec2_conn, instance_id=None):
+ instance = None
+ if not instance_id:
+ return instance
+
+ try:
+ reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance_id])
+ instance = camel_dict_to_snake_dict(reservation_response['Reservations'][0]['Instances'][0])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while getting instance_id with id {0}'.format(instance))
+
+ return instance
+
+
+def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True):
+ name = module.params.get('name')
+ param_id = module.params.get('id')
+ zone = module.params.get('zone')
+
+ if not vol_id:
+ vol_id = param_id
+
+ # If no name or id supplied, just try volume creation based on module parameters
+ if vol_id is None and name is None:
+ return None
+
+ find_params = dict()
+ vols = []
+
+ if vol_id:
+ find_params['VolumeIds'] = [vol_id]
+ elif name:
+ find_params['Filters'] = ansible_dict_to_boto3_filter_list({'tag:Name': name})
+ elif zone:
+ find_params['Filters'] = ansible_dict_to_boto3_filter_list({'availability-zone': zone})
+
+ try:
+ paginator = ec2_conn.get_paginator('describe_volumes')
+ vols_response = paginator.paginate(**find_params)
+ vols = list(vols_response)[0].get('Volumes')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if is_boto3_error_code('InvalidVolume.NotFound'):
+ module.exit_json(msg="Volume {0} does not exist".format(vol_id), changed=False)
+ module.fail_json_aws(e, msg='Error while getting EBS volumes with the parameters {0}'.format(find_params))
+
+ if not vols:
+ if fail_on_not_found and vol_id:
+ msg = "Could not find volume with id: {0}".format(vol_id)
+ if name:
+ msg += (" and name: {0}".format(name))
+ module.fail_json(msg=msg)
+ else:
+ return None
+
+ if len(vols) > 1:
+ module.fail_json(
+ msg="Found more than one volume in zone (if specified) with name: {0}".format(name),
+ found=[v['VolumeId'] for v in vols]
+ )
+ vol = camel_dict_to_snake_dict(vols[0])
+ return vol
+
+
+def get_volumes(module, ec2_conn):
+ instance = module.params.get('instance')
+
+ find_params = dict()
+ if instance:
+ find_params['Filters'] = ansible_dict_to_boto3_filter_list({'attachment.instance-id': instance})
+
+ vols = []
+ try:
+ vols_response = ec2_conn.describe_volumes(aws_retry=True, **find_params)
+ vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get('Volumes', [])]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while getting EBS volumes')
+ return vols
+
+
+def delete_volume(module, ec2_conn, volume_id=None):
+ changed = False
+ if volume_id:
+ try:
+ ec2_conn.delete_volume(aws_retry=True, VolumeId=volume_id)
+ changed = True
+ except is_boto3_error_code('InvalidVolume.NotFound'):
+ module.exit_json(changed=False)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Error while deleting volume')
+ return changed
+
+
+def update_volume(module, ec2_conn, volume):
+ changed = False
+ req_obj = {'VolumeId': volume['volume_id']}
+
+ if module.params.get('modify_volume'):
+ iops_changed = False
+ if volume['volume_type'] != 'standard':
+ target_iops = module.params.get('iops')
+ if target_iops:
+ original_iops = volume['iops']
+ if target_iops != original_iops:
+ iops_changed = True
+ req_obj['iops'] = target_iops
+
+ target_size = module.params.get('volume_size')
+ size_changed = False
+ if target_size:
+ original_size = volume['size']
+ if target_size != original_size:
+ size_changed = True
+ req_obj['size'] = target_size
+
+ target_type = module.params.get('volume_type')
+ original_type = None
+ type_changed = False
+ if target_type:
+ original_type = volume['volume_type']
+ if target_type != original_type:
+ type_changed = True
+ req_obj['VolumeType'] = target_type
+
+ target_throughput = module.params.get('throughput')
+ throughput_changed = False
+ if 'gp3' in [target_type, original_type]:
+ if target_throughput:
+ original_throughput = volume.get('throughput')
+ if target_throughput != original_throughput:
+ throughput_changed = True
+ req_obj['Throughput'] = target_throughput
+
+ changed = iops_changed or size_changed or type_changed or throughput_changed
+
+ if changed:
+ response = ec2_conn.modify_volume(**req_obj)
+
+ volume['size'] = response.get('VolumeModification').get('TargetSize')
+ volume['volume_type'] = response.get('VolumeModification').get('TargetVolumeType')
+ volume['iops'] = response.get('VolumeModification').get('TargetIops')
+ volume['throughput'] = response.get('VolumeModification').get('TargetThroughput')
+
+ return volume, changed
+
+
+def create_volume(module, ec2_conn, zone):
+ changed = False
+ iops = module.params.get('iops')
+ encrypted = module.params.get('encrypted')
+ kms_key_id = module.params.get('kms_key_id')
+ volume_size = module.params.get('volume_size')
+ volume_type = module.params.get('volume_type')
+ snapshot = module.params.get('snapshot')
+ throughput = module.params.get('throughput')
+ # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
+ if iops:
+ volume_type = 'io1'
+
+ volume = get_volume(module, ec2_conn)
+
+ if volume is None:
+
+ try:
+ changed = True
+ additional_params = dict()
+
+ if volume_size:
+ additional_params['Size'] = int(volume_size)
+
+ if kms_key_id:
+ additional_params['KmsKeyId'] = kms_key_id
+
+ if snapshot:
+ additional_params['SnapshotId'] = snapshot
+
+ if iops:
+ additional_params['Iops'] = int(iops)
+
+ if throughput:
+ additional_params['Throughput'] = int(throughput)
+
+ create_vol_response = ec2_conn.create_volume(
+ aws_retry=True,
+ AvailabilityZone=zone,
+ Encrypted=encrypted,
+ VolumeType=volume_type,
+ **additional_params
+ )
+
+ waiter = ec2_conn.get_waiter('volume_available')
+ waiter.wait(
+ VolumeIds=[create_vol_response['VolumeId']],
+ )
+ volume = get_volume(module, ec2_conn, vol_id=create_vol_response['VolumeId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while creating EBS volume')
+
+ return volume, changed
+
+
+def attach_volume(module, ec2_conn, volume_dict, instance_dict, device_name):
+ changed = False
+
+ # If device_name isn't set, make a choice based on best practices here:
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
+
+ # In future this needs to be more dynamic but combining block device mapping best practices
+ # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
+
+ attachment_data = get_attachment_data(volume_dict, wanted_state='attached')
+ if attachment_data:
+ if attachment_data.get('instance_id', None) != instance_dict['instance_id']:
+ module.fail_json(msg="Volume {0} is already attached to another instance: {1}".format(volume_dict['volume_id'],
+ attachment_data.get('instance_id', None)))
+ else:
+ return volume_dict, changed
+
+ try:
+ attach_response = ec2_conn.attach_volume(aws_retry=True, Device=device_name,
+ InstanceId=instance_dict['instance_id'],
+ VolumeId=volume_dict['volume_id'])
+
+ waiter = ec2_conn.get_waiter('volume_in_use')
+ waiter.wait(VolumeIds=[attach_response['VolumeId']])
+ changed = True
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Error while attaching EBS volume')
+
+ modify_dot_attribute(module, ec2_conn, instance_dict, device_name)
+
+ volume = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id'])
+ return volume, changed
+
+
+def modify_dot_attribute(module, ec2_conn, instance_dict, device_name):
+ """ Modify delete_on_termination attribute """
+
+ delete_on_termination = module.params.get('delete_on_termination')
+ changed = False
+
+ # volume_in_use can return *shortly* before it appears on the instance
+ # description
+ mapped_block_device = None
+ _attempt = 0
+ while mapped_block_device is None:
+ _attempt += 1
+ instance_dict = get_instance(module, ec2_conn=ec2_conn, instance_id=instance_dict['instance_id'])
+ mapped_block_device = get_mapped_block_device(instance_dict=instance_dict, device_name=device_name)
+ if mapped_block_device is None:
+ if _attempt > 2:
+ module.fail_json(msg='Unable to find device on instance',
+ device=device_name, instance=instance_dict)
+ time.sleep(1)
+
+ if delete_on_termination != mapped_block_device['ebs'].get('delete_on_termination'):
+ try:
+ ec2_conn.modify_instance_attribute(
+ aws_retry=True,
+ InstanceId=instance_dict['instance_id'],
+ BlockDeviceMappings=[{
+ "DeviceName": device_name,
+ "Ebs": {
+ "DeleteOnTermination": delete_on_termination
+ }
+ }]
+ )
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e,
+ msg='Error while modifying Block Device Mapping of instance {0}'.format(instance_dict['instance_id']))
+
+ return changed
+
+
+def get_attachment_data(volume_dict, wanted_state=None):
+ changed = False
+
+ attachment_data = {}
+ if not volume_dict:
+ return attachment_data
+ for data in volume_dict.get('attachments', []):
+ if wanted_state and wanted_state == data['state']:
+ attachment_data = data
+ break
+ else:
+ # No filter, return first
+ attachment_data = data
+ break
+
+ return attachment_data
+
+
+def detach_volume(module, ec2_conn, volume_dict):
+ changed = False
+
+ attachment_data = get_attachment_data(volume_dict, wanted_state='attached')
+ if attachment_data:
+ ec2_conn.detach_volume(aws_retry=True, VolumeId=volume_dict['volume_id'])
+ waiter = ec2_conn.get_waiter('volume_available')
+ waiter.wait(
+ VolumeIds=[volume_dict['volume_id']],
+ )
+ changed = True
+
+ volume_dict = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id'])
+ return volume_dict, changed
+
+
+def get_volume_info(volume, tags=None):
+ if not tags:
+ tags = boto3_tag_list_to_ansible_dict(volume.get('tags'))
+ attachment_data = get_attachment_data(volume)
+ volume_info = {
+ 'create_time': volume.get('create_time'),
+ 'encrypted': volume.get('encrypted'),
+ 'id': volume.get('volume_id'),
+ 'iops': volume.get('iops'),
+ 'size': volume.get('size'),
+ 'snapshot_id': volume.get('snapshot_id'),
+ 'status': volume.get('state'),
+ 'type': volume.get('volume_type'),
+ 'zone': volume.get('availability_zone'),
+ 'throughput': volume.get('throughput'),
+ 'attachment_set': {
+ 'attach_time': attachment_data.get('attach_time', None),
+ 'device': attachment_data.get('device', None),
+ 'instance_id': attachment_data.get('instance_id', None),
+ 'status': attachment_data.get('state', None),
+ 'deleteOnTermination': attachment_data.get('delete_on_termination', None)
+ },
+ 'tags': tags
+ }
+
+ return volume_info
+
+
+def get_mapped_block_device(instance_dict=None, device_name=None):
+ mapped_block_device = None
+ if not instance_dict:
+ return mapped_block_device
+ if not device_name:
+ return mapped_block_device
+
+ for device in instance_dict.get('block_device_mappings', []):
+ if device['device_name'] == device_name:
+ mapped_block_device = device
+ break
+
+ return mapped_block_device
+
+
+def ensure_tags(module, connection, res_id, res_type, tags, add_only):
+ changed = False
+
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': res_id, 'resource-type': res_type})
+ cur_tags = None
+ try:
+ cur_tags = connection.describe_tags(aws_retry=True, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ purge_tags = bool(not add_only)
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+ final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags'))
+
+ if to_update:
+ try:
+ if module.check_mode:
+ # update tags
+ final_tags.update(to_update)
+ else:
+ connection.create_tags(
+ aws_retry=True,
+ Resources=[res_id],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create tags")
+
+ if to_delete:
+ try:
+ if module.check_mode:
+ # update tags
+ for key in to_delete:
+ del final_tags[key]
+ else:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ connection.delete_tags(aws_retry=True, Resources=[res_id], Tags=tags_list)
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags")
+
+ if not module.check_mode and (to_update or to_delete):
+ try:
+ response = connection.describe_tags(aws_retry=True, Filters=filters)
+ final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ return final_tags, changed
+
+
+def main():
+ argument_spec = dict(
+ instance=dict(),
+ id=dict(),
+ name=dict(),
+ volume_size=dict(type='int'),
+ volume_type=dict(default='standard', choices=['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']),
+ iops=dict(type='int'),
+ encrypted=dict(default=False, type='bool'),
+ kms_key_id=dict(),
+ device_name=dict(),
+ delete_on_termination=dict(default=False, type='bool'),
+ zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
+ snapshot=dict(),
+ state=dict(default='present', choices=['absent', 'present', 'list']),
+ tags=dict(default={}, type='dict'),
+ modify_volume=dict(default=False, type='bool'),
+ throughput=dict(type='int')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ param_id = module.params.get('id')
+ name = module.params.get('name')
+ instance = module.params.get('instance')
+ volume_size = module.params.get('volume_size')
+ device_name = module.params.get('device_name')
+ zone = module.params.get('zone')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+
+ if state == 'list':
+ module.deprecate(
+ 'Using the "list" state has been deprecated. Please use the ec2_vol_info module instead', date='2022-06-01', collection_name='amazon.aws')
+
+ # Ensure we have the zone or can get the zone
+ if instance is None and zone is None and state == 'present':
+ module.fail_json(msg="You must specify either instance or zone")
+
+ # Set volume detach flag
+ if instance == 'None' or instance == '':
+ instance = None
+ detach_vol_flag = True
+ else:
+ detach_vol_flag = False
+
+ # Set changed flag
+ changed = False
+
+ ec2_conn = module.client('ec2', AWSRetry.jittered_backoff())
+
+ if state == 'list':
+ returned_volumes = []
+ vols = get_volumes(module, ec2_conn)
+
+ for v in vols:
+ returned_volumes.append(get_volume_info(v))
+
+ module.exit_json(changed=False, volumes=returned_volumes)
+
+ # Here we need to get the zone info for the instance. This covers situation where
+ # instance is specified but zone isn't.
+ # Useful for playbooks chaining instance launch with volume create + attach and where the
+ # zone doesn't matter to the user.
+ inst = None
+
+ # Delaying the checks until after the instance check allows us to get volume ids for existing volumes
+ # without needing to pass an unused volume_size
+ if not volume_size and not (param_id or name or snapshot):
+ module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
+
+ # Try getting volume
+ volume = get_volume(module, ec2_conn, fail_on_not_found=False)
+ if state == 'present':
+ if instance:
+ inst = get_instance(module, ec2_conn, instance_id=instance)
+ zone = inst['placement']['availability_zone']
+
+ # Use password data attribute to tell whether the instance is Windows or Linux
+ if device_name is None:
+ if inst['platform'] == 'Windows':
+ device_name = '/dev/xvdf'
+ else:
+ device_name = '/dev/sdf'
+
+ # Check if there is a volume already mounted there.
+ mapped_device = get_mapped_block_device(instance_dict=inst, device_name=device_name)
+ if mapped_device:
+ other_volume_mapped = False
+
+ if volume:
+ if volume['volume_id'] != mapped_device['ebs']['volume_id']:
+ other_volume_mapped = True
+ else:
+ # No volume found so this is another volume
+ other_volume_mapped = True
+
+ if other_volume_mapped:
+ module.exit_json(
+ msg="Volume mapping for {0} already exists on instance {1}".format(device_name, instance),
+ volume_id=mapped_device['ebs']['volume_id'],
+ found_volume=volume,
+ device=device_name,
+ changed=False
+ )
+
+ attach_state_changed = False
+
+ if volume:
+ volume, changed = update_volume(module, ec2_conn, volume)
+ else:
+ volume, changed = create_volume(module, ec2_conn, zone=zone)
+
+ tags['Name'] = name
+ final_tags, tags_changed = ensure_tags(module, ec2_conn, volume['volume_id'], 'volume', tags, False)
+
+ if detach_vol_flag:
+ volume, changed = detach_volume(module, ec2_conn, volume_dict=volume)
+ elif inst is not None:
+ volume, changed = attach_volume(module, ec2_conn, volume_dict=volume, instance_dict=inst, device_name=device_name)
+
+ # Add device, volume_id and volume_type parameters separately to maintain backward compatibility
+ volume_info = get_volume_info(volume, tags=final_tags)
+
+ module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'],
+ volume_id=volume_info['id'], volume_type=volume_info['type'])
+ elif state == 'absent':
+ if not name and not param_id:
+ module.fail_json('A volume name or id is required for deletion')
+ if volume:
+ detach_volume(module, ec2_conn, volume_dict=volume)
+ changed = delete_volume(module, ec2_conn, volume_id=volume['volume_id'])
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_facts.py
new file mode 100644
index 00000000..fb6a6587
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_facts.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vol_info
+version_added: 1.0.0
+short_description: Gather information about ec2 volumes in AWS
+description:
+ - Gather information about ec2 volumes in AWS.
+ - This module was called C(ec2_vol_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ type: dict
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all volumes
+- amazon.aws.ec2_vol_info:
+
+# Gather information about a particular volume using volume ID
+- amazon.aws.ec2_vol_info:
+ filters:
+ volume-id: vol-00112233
+
+# Gather information about any volume with a tag key Name and value Example
+- amazon.aws.ec2_vol_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any volume that is attached
+- amazon.aws.ec2_vol_info:
+ filters:
+ attachment.status: attached
+
+'''
+
+RETURN = '''
+volumes:
+ description: Volumes that match the provided filters. Each element consists of a dict with all the information related to that volume.
+ type: list
+ elements: dict
+ returned: always
+ contains:
+ attachment_set:
+ description: Information about the volume attachments.
+ type: dict
+ sample: {
+ "attach_time": "2015-10-23T00:22:29.000Z",
+ "deleteOnTermination": "false",
+ "device": "/dev/sdf",
+ "instance_id": "i-8356263c",
+ "status": "attached"
+ }
+ create_time:
+ description: The time stamp when volume creation was initiated.
+ type: str
+ sample: "2015-10-21T14:36:08.870Z"
+ encrypted:
+ description: Indicates whether the volume is encrypted.
+ type: bool
+ sample: False
+ id:
+ description: The ID of the volume.
+ type: str
+ sample: "vol-35b333d9"
+ iops:
+ description: The number of I/O operations per second (IOPS) that the volume supports.
+ type: int
+ sample: null
+ size:
+ description: The size of the volume, in GiBs.
+ type: int
+ sample: 1
+ snapshot_id:
+ description: The snapshot from which the volume was created, if applicable.
+ type: str
+ sample: ""
+ status:
+ description: The volume state.
+ type: str
+ sample: "in-use"
+ tags:
+ description: Any tags assigned to the volume.
+ type: dict
+ sample: {
+ env: "dev"
+ }
+ type:
+ description: The volume type. This can be gp2, io1, st1, sc1, or standard.
+ type: str
+ sample: "standard"
+ zone:
+ description: The Availability Zone of the volume.
+ type: str
+ sample: "us-east-1b"
+'''
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_volume_info(volume, region):
+
+ attachment = volume["attachments"]
+
+ volume_info = {
+ 'create_time': volume["create_time"],
+ 'id': volume["volume_id"],
+ 'encrypted': volume["encrypted"],
+ 'iops': volume["iops"] if "iops" in volume else None,
+ 'size': volume["size"],
+ 'snapshot_id': volume["snapshot_id"],
+ 'status': volume["state"],
+ 'type': volume["volume_type"],
+ 'zone': volume["availability_zone"],
+ 'region': region,
+ 'attachment_set': {
+ 'attach_time': attachment[0]["attach_time"] if len(attachment) > 0 else None,
+ 'device': attachment[0]["device"] if len(attachment) > 0 else None,
+ 'instance_id': attachment[0]["instance_id"] if len(attachment) > 0 else None,
+ 'status': attachment[0]["state"] if len(attachment) > 0 else None,
+ 'delete_on_termination': attachment[0]["delete_on_termination"] if len(attachment) > 0 else None
+ },
+ 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None
+ }
+
+ return volume_info
+
+
+@AWSRetry.jittered_backoff()
+def describe_volumes_with_backoff(connection, filters):
+ paginator = connection.get_paginator('describe_volumes')
+ return paginator.paginate(Filters=filters).build_full_result()
+
+
+def list_ec2_volumes(connection, module):
+
+ # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
+ sanitized_filters = module.params.get("filters")
+ for key in list(sanitized_filters):
+ if not key.startswith("tag:"):
+ sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
+ volume_dict_array = []
+
+ try:
+ all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters))
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe volumes.")
+
+ for volume in all_volumes["Volumes"]:
+ volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags'])
+ volume_dict_array.append(get_volume_info(volume, module.region))
+ module.exit_json(volumes=volume_dict_array)
+
+
+def main():
+ argument_spec = dict(filters=dict(default={}, type='dict'))
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vol_facts':
+ module.deprecate("The 'ec2_vol_facts' module has been renamed to 'ec2_vol_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2')
+
+ list_ec2_volumes(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py
new file mode 100644
index 00000000..fb6a6587
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vol_info
+version_added: 1.0.0
+short_description: Gather information about ec2 volumes in AWS
+description:
+ - Gather information about ec2 volumes in AWS.
+ - This module was called C(ec2_vol_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ type: dict
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all volumes
+- amazon.aws.ec2_vol_info:
+
+# Gather information about a particular volume using volume ID
+- amazon.aws.ec2_vol_info:
+ filters:
+ volume-id: vol-00112233
+
+# Gather information about any volume with a tag key Name and value Example
+- amazon.aws.ec2_vol_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any volume that is attached
+- amazon.aws.ec2_vol_info:
+ filters:
+ attachment.status: attached
+
+'''
+
+RETURN = '''
+volumes:
+ description: Volumes that match the provided filters. Each element consists of a dict with all the information related to that volume.
+ type: list
+ elements: dict
+ returned: always
+ contains:
+ attachment_set:
+ description: Information about the volume attachments.
+ type: dict
+ sample: {
+ "attach_time": "2015-10-23T00:22:29.000Z",
+ "deleteOnTermination": "false",
+ "device": "/dev/sdf",
+ "instance_id": "i-8356263c",
+ "status": "attached"
+ }
+ create_time:
+ description: The time stamp when volume creation was initiated.
+ type: str
+ sample: "2015-10-21T14:36:08.870Z"
+ encrypted:
+ description: Indicates whether the volume is encrypted.
+ type: bool
+ sample: False
+ id:
+ description: The ID of the volume.
+ type: str
+ sample: "vol-35b333d9"
+ iops:
+ description: The number of I/O operations per second (IOPS) that the volume supports.
+ type: int
+ sample: null
+ size:
+ description: The size of the volume, in GiBs.
+ type: int
+ sample: 1
+ snapshot_id:
+ description: The snapshot from which the volume was created, if applicable.
+ type: str
+ sample: ""
+ status:
+ description: The volume state.
+ type: str
+ sample: "in-use"
+ tags:
+ description: Any tags assigned to the volume.
+ type: dict
+ sample: {
+ env: "dev"
+ }
+ type:
+ description: The volume type. This can be gp2, io1, st1, sc1, or standard.
+ type: str
+ sample: "standard"
+ zone:
+ description: The Availability Zone of the volume.
+ type: str
+ sample: "us-east-1b"
+'''
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_volume_info(volume, region):
+
+ attachment = volume["attachments"]
+
+ volume_info = {
+ 'create_time': volume["create_time"],
+ 'id': volume["volume_id"],
+ 'encrypted': volume["encrypted"],
+ 'iops': volume["iops"] if "iops" in volume else None,
+ 'size': volume["size"],
+ 'snapshot_id': volume["snapshot_id"],
+ 'status': volume["state"],
+ 'type': volume["volume_type"],
+ 'zone': volume["availability_zone"],
+ 'region': region,
+ 'attachment_set': {
+ 'attach_time': attachment[0]["attach_time"] if len(attachment) > 0 else None,
+ 'device': attachment[0]["device"] if len(attachment) > 0 else None,
+ 'instance_id': attachment[0]["instance_id"] if len(attachment) > 0 else None,
+ 'status': attachment[0]["state"] if len(attachment) > 0 else None,
+ 'delete_on_termination': attachment[0]["delete_on_termination"] if len(attachment) > 0 else None
+ },
+ 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None
+ }
+
+ return volume_info
+
+
+@AWSRetry.jittered_backoff()
+def describe_volumes_with_backoff(connection, filters):
+ paginator = connection.get_paginator('describe_volumes')
+ return paginator.paginate(Filters=filters).build_full_result()
+
+
+def list_ec2_volumes(connection, module):
+
+ # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
+ sanitized_filters = module.params.get("filters")
+ for key in list(sanitized_filters):
+ if not key.startswith("tag:"):
+ sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
+ volume_dict_array = []
+
+ try:
+ all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters))
+ except ClientError as e:
+ module.fail_json_aws(e, msg="Failed to describe volumes.")
+
+ for volume in all_volumes["Volumes"]:
+ volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags'])
+ volume_dict_array.append(get_volume_info(volume, module.region))
+ module.exit_json(volumes=volume_dict_array)
+
+
+def main():
+ argument_spec = dict(filters=dict(default={}, type='dict'))
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vol_facts':
+ module.deprecate("The 'ec2_vol_facts' module has been renamed to 'ec2_vol_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2')
+
+ list_ec2_volumes(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py
new file mode 100644
index 00000000..5cbb8e6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_dhcp_option
+version_added: 1.0.0
+short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
+ requested
+description:
+ - This module removes, or creates DHCP option sets, and can associate them to a VPC.
+ Optionally, a new DHCP Options set can be created that converges a VPC's existing
+ DHCP option set with values provided.
+ When dhcp_options_id is provided, the module will
+ 1. remove (with state='absent')
+ 2. ensure tags are applied (if state='present' and tags are provided
+ 3. attach it to a VPC (if state='present' and a vpc_id is provided.
+ If any of the optional values are missing, they will either be treated
+ as a no-op (i.e., inherit what already exists for the VPC)
+ To remove existing options while inheriting, supply an empty value
+ (e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
+ Most of the options should be self-explanatory.
+author: "Joel Thompson (@joelthompson)"
+options:
+ domain_name:
+ description:
+ - The domain name to set in the DHCP option sets
+ type: str
+ dns_servers:
+ description:
+ - A list of hosts to set the DNS servers for the VPC to. (Should be a
+ list of IP addresses rather than host names.)
+ type: list
+ elements: str
+ ntp_servers:
+ description:
+ - List of hosts to advertise as NTP servers for the VPC.
+ type: list
+ elements: str
+ netbios_name_servers:
+ description:
+ - List of hosts to advertise as NetBIOS servers.
+ type: list
+ elements: str
+ netbios_node_type:
+ description:
+ - NetBIOS node type to advertise in the DHCP options.
+ The AWS recommendation is to use 2 (when using netbios name services)
+ U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
+ type: int
+ vpc_id:
+ description:
+ - VPC ID to associate with the requested DHCP option set.
+ If no vpc id is provided, and no matching option set is found then a new
+ DHCP option set is created.
+ type: str
+ delete_old:
+ description:
+ - Whether to delete the old VPC DHCP option set when associating a new one.
+ This is primarily useful for debugging/development purposes when you
+ want to quickly roll back to the old option set. Note that this setting
+ will be ignored, and the old DHCP option set will be preserved, if it
+ is in use by any other VPC. (Otherwise, AWS will return an error.)
+ type: bool
+ default: 'yes'
+ inherit_existing:
+ description:
+ - For any DHCP options not specified in these parameters, whether to
+ inherit them from the options set already applied to vpc_id, or to
+ reset them to be empty.
+ type: bool
+ default: 'no'
+ tags:
+ description:
+ - Tags to be applied to a VPC options set if a new one is created, or
+ if the resource_id is provided. (options must match)
+ aliases: [ 'resource_tags']
+ type: dict
+ dhcp_options_id:
+ description:
+ - The resource_id of an existing DHCP options set.
+ If this is specified, then it will override other settings, except tags
+ (which will be updated to match)
+ type: str
+ state:
+ description:
+ - create/assign or remove the DHCP options.
+ If state is set to absent, then a DHCP options set matched either
+ by id, or tags and options will be removed if possible.
+ default: present
+ choices: [ 'absent', 'present' ]
+ type: str
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+requirements:
+ - boto
+'''
+
+RETURN = """
+new_options:
+ description: The DHCP options created, associated or found
+ returned: when appropriate
+ type: dict
+ sample:
+ domain-name-servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbois-name-servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios-node-type: 2
+ domain-name: "my.example.com"
+dhcp_options_id:
+ description: The aws resource id of the primary DCHP options set created, found or removed
+ type: str
+ returned: when available
+changed:
+ description: Whether the dhcp options were changed
+ type: bool
+ returned: always
+"""
+
+EXAMPLES = """
+# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
+# DHCP option set that may have been attached to that VPC.
+- amazon.aws.ec2_vpc_dhcp_option:
+ domain_name: "foo.example.com"
+ region: us-east-1
+ dns_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ vpc_id: vpc-123456
+ delete_old: True
+ inherit_existing: False
+
+
+# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
+# keep any other existing settings. Also, keep the old DHCP option set around.
+- amazon.aws.ec2_vpc_dhcp_option:
+ region: us-east-1
+ dns_servers:
+ - "{{groups['dns-primary']}}"
+ - "{{groups['dns-secondary']}}"
+ vpc_id: vpc-123456
+ inherit_existing: True
+ delete_old: False
+
+
+## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
+## but do not assign to a VPC
+- amazon.aws.ec2_vpc_dhcp_option:
+ region: us-east-1
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: google servers
+ Environment: Test
+
+## Delete a DHCP options set that matches the tags and options specified
+- amazon.aws.ec2_vpc_dhcp_option:
+ region: us-east-1
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: google servers
+ Environment: Test
+ state: absent
+
+## Associate a DHCP options set with a VPC by ID
+- amazon.aws.ec2_vpc_dhcp_option:
+ region: us-east-1
+ dhcp_options_id: dopt-12345678
+ vpc_id: vpc-123456
+
+"""
+
+import collections
+from time import sleep, time
+
+try:
+ import boto.vpc
+ import boto.ec2
+ from boto.exception import EC2ResponseError
+except ImportError:
+ pass # Taken care of by ec2.HAS_BOTO
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import HAS_BOTO
+from ..module_utils.ec2 import connect_to_aws
+from ..module_utils.ec2 import get_aws_connection_info
+
+
+def get_resource_tags(vpc_conn, resource_id):
+ return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
+
+
+def retry_not_found(to_call, *args, **kwargs):
+ start_time = time()
+ while time() < start_time + 300:
+ try:
+ return to_call(*args, **kwargs)
+ except EC2ResponseError as e:
+ if e.error_code in ['InvalidDhcpOptionID.NotFound', 'InvalidDhcpOptionsID.NotFound']:
+ sleep(3)
+ continue
+ raise e
+
+
+def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode):
+ try:
+ cur_tags = get_resource_tags(vpc_conn, resource_id)
+ if tags == cur_tags:
+ return {'changed': False, 'tags': cur_tags}
+
+ to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
+ if to_delete and not add_only:
+ retry_not_found(vpc_conn.delete_tags, resource_id, to_delete, dry_run=check_mode)
+
+ to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
+ if to_add:
+ retry_not_found(vpc_conn.create_tags, resource_id, to_add, dry_run=check_mode)
+
+ latest_tags = get_resource_tags(vpc_conn, resource_id)
+ return {'changed': True, 'tags': latest_tags}
+ except EC2ResponseError as e:
+ module.fail_json_aws(e, msg='Failed to modify tags')
+
+
+def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
+ """
+ Returns the DHCP options object currently associated with the requested VPC ID using the VPC
+ connection variable.
+ """
+ vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id])
+ if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default":
+ return None
+ dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id])
+ if len(dhcp_options) != 1:
+ return None
+ return dhcp_options[0]
+
+
+def match_dhcp_options(vpc_conn, tags=None, options=None):
+ """
+ Finds a DHCP Options object that optionally matches the tags and options provided
+ """
+ dhcp_options = vpc_conn.get_all_dhcp_options()
+ for dopts in dhcp_options:
+ if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags:
+ if (not options) or dopts.options == options:
+ return(True, dopts)
+ return(False, None)
+
+
+def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id):
+ associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id})
+ if len(associations) > 0:
+ return False
+ else:
+ vpc_conn.delete_dhcp_options(dhcp_options_id)
+ return True
+
+
+def main():
+ argument_spec = dict(
+ dhcp_options_id=dict(type='str', default=None),
+ domain_name=dict(type='str', default=None),
+ dns_servers=dict(type='list', elements='str', default=None),
+ ntp_servers=dict(type='list', elements='str', default=None),
+ netbios_name_servers=dict(type='list', elements='str', default=None),
+ netbios_node_type=dict(type='int', default=None),
+ vpc_id=dict(type='str', default=None),
+ delete_old=dict(type='bool', default=True),
+ inherit_existing=dict(type='bool', default=False),
+ tags=dict(type='dict', default=None, aliases=['resource_tags']),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ check_boto3=False,
+ supports_check_mode=True
+ )
+
+ params = module.params
+ found = False
+ changed = False
+ new_options = collections.defaultdict(lambda: None)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required for this module')
+
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ connection = connect_to_aws(boto.vpc, region, **boto_params)
+
+ existing_options = None
+
+ # First check if we were given a dhcp_options_id
+ if not params['dhcp_options_id']:
+ # No, so create new_options from the parameters
+ if params['dns_servers'] is not None:
+ new_options['domain-name-servers'] = params['dns_servers']
+ if params['netbios_name_servers'] is not None:
+ new_options['netbios-name-servers'] = params['netbios_name_servers']
+ if params['ntp_servers'] is not None:
+ new_options['ntp-servers'] = params['ntp_servers']
+ if params['domain_name'] is not None:
+ # needs to be a list for comparison with boto objects later
+ new_options['domain-name'] = [params['domain_name']]
+ if params['netbios_node_type'] is not None:
+ # needs to be a list for comparison with boto objects later
+ new_options['netbios-node-type'] = [str(params['netbios_node_type'])]
+ # If we were given a vpc_id then we need to look at the options on that
+ if params['vpc_id']:
+ existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
+ # if we've been asked to inherit existing options, do that now
+ if params['inherit_existing']:
+ if existing_options:
+ for option in ['domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
+ if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
+ new_options[option] = existing_options.options.get(option)
+
+ # Do the vpc's dhcp options already match what we're asked for? if so we are done
+ if existing_options and new_options == existing_options.options:
+ module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id)
+
+ # If no vpc_id was given, or the options don't match then look for an existing set using tags
+ found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options)
+
+ # Now let's cover the case where there are existing options that we were told about by id
+ # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
+ else:
+ supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id': params['dhcp_options_id']})
+ if len(supplied_options) != 1:
+ if params['state'] != 'absent':
+ module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
+ else:
+ found = True
+ dhcp_option = supplied_options[0]
+ if params['state'] != 'absent' and params['tags']:
+ ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode)
+
+ # Now we have the dhcp options set, let's do the necessary
+
+ # if we found options we were asked to remove then try to do so
+ if params['state'] == 'absent':
+ if not module.check_mode:
+ if found:
+ changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
+ module.exit_json(changed=changed, new_options={})
+
+ # otherwise if we haven't found the required options we have something to do
+ elif not module.check_mode and not found:
+
+ # create some dhcp options if we weren't able to use existing ones
+ if not found:
+ # Convert netbios-node-type and domain-name back to strings
+ if new_options['netbios-node-type']:
+ new_options['netbios-node-type'] = new_options['netbios-node-type'][0]
+ if new_options['domain-name']:
+ new_options['domain-name'] = new_options['domain-name'][0]
+
+ # create the new dhcp options set requested
+ dhcp_option = connection.create_dhcp_options(
+ new_options['domain-name'],
+ new_options['domain-name-servers'],
+ new_options['ntp-servers'],
+ new_options['netbios-name-servers'],
+ new_options['netbios-node-type'])
+
+ # wait for dhcp option to be accessible
+ found_dhcp_opt = False
+ start_time = time()
+ try:
+ found_dhcp_opt = retry_not_found(connection.get_all_dhcp_options, dhcp_options_ids=[dhcp_option.id])
+ except EC2ResponseError as e:
+ module.fail_json_aws(e, msg="Failed to describe DHCP options")
+ if not found_dhcp_opt:
+ module.fail_json(msg="Failed to wait for {0} to be available.".format(dhcp_option.id))
+
+ changed = True
+ if params['tags']:
+ ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode)
+
+ # If we were given a vpc_id, then attach the options we now have to that before we finish
+ if params['vpc_id'] and not module.check_mode:
+ changed = True
+ connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
+ # and remove old ones if that was requested
+ if params['delete_old'] and existing_options:
+ remove_dhcp_options_by_id(connection, existing_options.id)
+
+ module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_facts.py
new file mode 100644
index 00000000..f82f8b3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_facts.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_dhcp_option_info
+version_added: 1.0.0
+short_description: Gather information about dhcp options sets in AWS
+description:
+ - Gather information about dhcp options sets in AWS.
+ - This module was called C(ec2_vpc_dhcp_option_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Nick Aslanidis (@naslanidis)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html) for possible filters.
+ type: dict
+ dhcp_options_ids:
+ description:
+ - Get details of specific DHCP Option IDs.
+ aliases: ['DhcpOptionIds']
+ type: list
+ elements: str
+ dry_run:
+ description:
+ - Checks whether you have the required permissions to view the DHCP
+ Options.
+ aliases: ['DryRun']
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all DHCP Option sets for an account or profile
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ register: dhcp_info
+
+- name: Gather information about a filtered list of DHCP Option sets
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "abc-123"
+ register: dhcp_info
+
+- name: Gather information about a specific DHCP Option set by DhcpOptionId
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ DhcpOptionsIds: dopt-123fece2
+ register: dhcp_info
+
+'''
+
+RETURN = '''
+dhcp_options:
+ description: The dhcp option sets for the account
+ returned: always
+ type: list
+
+changed:
+ description: True if listing the dhcp options succeeds
+ type: bool
+ returned: always
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_dhcp_options_info(dhcp_option):
+ dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
+ 'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
+ 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))}
+ return dhcp_option_info
+
+
+def list_dhcp_options(client, module):
+ params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters')))
+
+ if module.params.get("dry_run"):
+ params['DryRun'] = True
+
+ if module.params.get("dhcp_options_ids"):
+ params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids")
+
+ try:
+ all_dhcp_options = client.describe_dhcp_options(aws_retry=True, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ return [camel_dict_to_snake_dict(get_dhcp_options_info(option))
+ for option in all_dhcp_options['DhcpOptions']]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default={}),
+ dry_run=dict(type='bool', default=False, aliases=['DryRun']),
+ dhcp_options_ids=dict(type='list', elements='str', aliases=['DhcpOptionIds'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ if module._name == 'ec2_vpc_dhcp_option_facts':
+ module.deprecate("The 'ec2_vpc_dhcp_option_facts' module has been renamed to 'ec2_vpc_dhcp_option_info'",
+ date='2021-12-01', collection_name='amazon.aws')
+
+ client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # call your function here
+ results = list_dhcp_options(client, module)
+
+ module.exit_json(dhcp_options=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py
new file mode 100644
index 00000000..f82f8b3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_dhcp_option_info
+version_added: 1.0.0
+short_description: Gather information about dhcp options sets in AWS
+description:
+ - Gather information about dhcp options sets in AWS.
+ - This module was called C(ec2_vpc_dhcp_option_facts) before Ansible 2.9. The usage did not change.
+requirements: [ boto3 ]
+author: "Nick Aslanidis (@naslanidis)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html) for possible filters.
+ type: dict
+ dhcp_options_ids:
+ description:
+ - Get details of specific DHCP Option IDs.
+ aliases: ['DhcpOptionIds']
+ type: list
+ elements: str
+ dry_run:
+ description:
+ - Checks whether you have the required permissions to view the DHCP
+ Options.
+ aliases: ['DryRun']
+ type: bool
+ default: false
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all DHCP Option sets for an account or profile
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ register: dhcp_info
+
+- name: Gather information about a filtered list of DHCP Option sets
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "abc-123"
+ register: dhcp_info
+
+- name: Gather information about a specific DHCP Option set by DhcpOptionId
+ amazon.aws.ec2_vpc_dhcp_option_info:
+ region: ap-southeast-2
+ profile: production
+ DhcpOptionsIds: dopt-123fece2
+ register: dhcp_info
+
+'''
+
+RETURN = '''
+dhcp_options:
+ description: The dhcp option sets for the account
+ returned: always
+ type: list
+
+changed:
+ description: True if listing the dhcp options succeeds
+ type: bool
+ returned: always
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def get_dhcp_options_info(dhcp_option):
+ dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
+ 'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
+ 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))}
+ return dhcp_option_info
+
+
+def list_dhcp_options(client, module):
+ params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters')))
+
+ if module.params.get("dry_run"):
+ params['DryRun'] = True
+
+ if module.params.get("dhcp_options_ids"):
+ params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids")
+
+ try:
+ all_dhcp_options = client.describe_dhcp_options(aws_retry=True, **params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ return [camel_dict_to_snake_dict(get_dhcp_options_info(option))
+ for option in all_dhcp_options['DhcpOptions']]
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(type='dict', default={}),
+ dry_run=dict(type='bool', default=False, aliases=['DryRun']),
+ dhcp_options_ids=dict(type='list', elements='str', aliases=['DhcpOptionIds'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ if module._name == 'ec2_vpc_dhcp_option_facts':
+ module.deprecate("The 'ec2_vpc_dhcp_option_facts' module has been renamed to 'ec2_vpc_dhcp_option_info'",
+ date='2021-12-01', collection_name='amazon.aws')
+
+ client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # call your function here
+ results = list_dhcp_options(client, module)
+
+ module.exit_json(dhcp_options=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py
new file mode 100644
index 00000000..0d912031
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py
@@ -0,0 +1,535 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net
+version_added: 1.0.0
+short_description: Configure AWS virtual private clouds
+description:
+ - Create, modify, and terminate AWS virtual private clouds.
+author:
+ - Jonathan Davila (@defionscode)
+ - Sloane Hertel (@s-hertel)
+options:
+ name:
+ description:
+ - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists.
+ required: yes
+ type: str
+ cidr_block:
+ description:
+ - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR
+ and is used in conjunction with the C(name) to ensure idempotence.
+ required: yes
+ type: list
+ elements: str
+ ipv6_cidr:
+ description:
+ - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
+ or the size of the CIDR block.
+ default: False
+ type: bool
+ purge_cidrs:
+ description:
+ - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block).
+ default: no
+ type: bool
+ tenancy:
+ description:
+ - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
+ default: default
+ choices: [ 'default', 'dedicated' ]
+ type: str
+ dns_support:
+ description:
+ - Whether to enable AWS DNS support.
+ default: yes
+ type: bool
+ dns_hostnames:
+ description:
+ - Whether to enable AWS hostname support.
+ default: yes
+ type: bool
+ dhcp_opts_id:
+ description:
+ - The id of the DHCP options to use for this VPC.
+ type: str
+ tags:
+ description:
+ - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
+ the VPC if it's different.
+ aliases: [ 'resource_tags' ]
+ type: dict
+ state:
+ description:
+ - The state of the VPC. Either absent or present.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ multi_ok:
+ description:
+ - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
+ duplicate VPCs created.
+ type: bool
+ default: false
+requirements:
+ - boto3
+ - botocore
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a VPC with dedicated tenancy and a couple of tags
+ amazon.aws.ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ region: us-east-1
+ tags:
+ module: ec2_vpc_net
+ this: works
+ tenancy: dedicated
+
+- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
+ amazon.aws.ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ ipv6_cidr: True
+ region: us-east-1
+ tenancy: dedicated
+'''
+
+RETURN = '''
+vpc:
+ description: info about the VPC that was created or deleted
+ returned: always
+ type: complex
+ contains:
+ cidr_block:
+ description: The CIDR of the VPC
+ returned: always
+ type: str
+ sample: 10.0.0.0/16
+ cidr_block_association_set:
+ description: IPv4 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "cidr_block": "10.0.0.0/24",
+ "cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ classic_link_enabled:
+ description: indicates whether ClassicLink is enabled
+ returned: always
+ type: bool
+ sample: false
+ dhcp_options_id:
+ description: the id of the DHCP options associated with this VPC
+ returned: always
+ type: str
+ sample: dopt-12345678
+ id:
+ description: VPC resource id
+ returned: always
+ type: str
+ sample: vpc-12345678
+ instance_tenancy:
+ description: indicates whether VPC uses default or dedicated tenancy
+ returned: always
+ type: str
+ sample: default
+ ipv6_cidr_block_association_set:
+ description: IPv6 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "ipv6_cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "ipv6_cidr_block": "2001:db8::/56",
+ "ipv6_cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ is_default:
+ description: indicates whether this is the default VPC
+ returned: always
+ type: bool
+ sample: false
+ state:
+ description: state of the VPC
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the VPC, includes name
+ returned: always
+ type: complex
+ contains:
+ Name:
+ description: name tag for the VPC
+ returned: always
+ type: str
+ sample: pk_vpc4
+ owner_id:
+ description: The AWS account which owns the VPC.
+ returned: always
+ type: str
+ sample: 123456789012
+'''
+
+from time import sleep
+from time import time
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.network import to_subnet
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_message
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ..module_utils.ec2 import compare_aws_tags
+
+
+def vpc_exists(module, vpc, name, cidr_block, multi):
+ """Returns None or a vpc object depending on the existence of a VPC. When supplied
+ with a CIDR, it will check for matching tags to determine if it is a match
+ otherwise it will assume the VPC does not exist and thus return None.
+ """
+ try:
+ vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': cidr_block})
+ matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs']
+ # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
+ if not matching_vpcs:
+ vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': [cidr_block[0]]})
+ matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ if multi:
+ return None
+ elif len(matching_vpcs) == 1:
+ return matching_vpcs[0]['VpcId']
+ elif len(matching_vpcs) > 1:
+ module.fail_json(msg='Currently there are %d VPCs that have the same name and '
+ 'CIDR block you specified. If you would like to create '
+ 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
+ return None
+
+
+@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'])
+def get_classic_link_with_backoff(connection, vpc_id):
+ try:
+ return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled')
+ except is_boto3_error_message('The functionality you requested is not available in this region.'):
+ return False
+
+
+def get_vpc(module, connection, vpc_id):
+ # wait for vpc to be available
+ try:
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id))
+
+ try:
+ vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+ try:
+ vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ return vpc_obj
+
+
+def update_vpc_tags(connection, module, vpc_id, tags, name):
+ if tags is None:
+ tags = dict()
+
+ tags.update({'Name': name})
+ tags = dict((k, to_native(v)) for k, v in tags.items())
+ try:
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': vpc_id})
+ current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=filters, aws_retry=True)['Tags'])
+ tags_to_update, dummy = compare_aws_tags(current_tags, tags, False)
+ if tags_to_update:
+ if not module.check_mode:
+ tags = ansible_dict_to_boto3_tag_list(tags_to_update)
+ vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True)
+
+ # Wait for tags to be updated
+ expected_tags = boto3_tag_list_to_ansible_dict(tags)
+ filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()]
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters)
+
+ return True
+ else:
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update tags")
+
+
+def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ if vpc_obj['DhcpOptionsId'] != dhcp_id:
+ if not module.check_mode:
+ try:
+ connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'], aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
+
+ try:
+ # Wait for DhcpOptionsId to be updated
+ filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to wait for DhcpOptionsId to be updated")
+
+ return True
+ else:
+ return False
+
+
+def create_vpc(connection, module, cidr_block, tenancy):
+ try:
+ if not module.check_mode:
+ vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy, aws_retry=True)
+ else:
+ module.exit_json(changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to create the VPC")
+
+ # wait up to 30 seconds for vpc to exist
+ try:
+ connection.get_waiter('vpc_exists').wait(
+ VpcIds=[vpc_obj['Vpc']['VpcId']],
+ WaiterConfig=dict(MaxAttempts=30)
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId']))
+
+ return vpc_obj['Vpc']['VpcId']
+
+
+def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
+ start_time = time()
+ updated = False
+ while time() < start_time + 300:
+ current_value = connection.describe_vpc_attribute(
+ Attribute=attribute,
+ VpcId=vpc_id,
+ aws_retry=True
+ )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
+ if current_value != expected_value:
+ sleep(3)
+ else:
+ updated = True
+ break
+ if not updated:
+ module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
+
+
+def get_cidr_network_bits(module, cidr_block):
+ fixed_cidrs = []
+ for cidr in cidr_block:
+ split_addr = cidr.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 CIDR that may or may not have host bits set
+ # Get the network bits.
+ valid_cidr = to_subnet(split_addr[0], split_addr[1])
+ if cidr != valid_cidr:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
+ fixed_cidrs.append(valid_cidr)
+ else:
+ # let AWS handle invalid CIDRs
+ fixed_cidrs.append(cidr)
+ return fixed_cidrs
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ cidr_block=dict(type='list', required=True, elements='str'),
+ ipv6_cidr=dict(type='bool', default=False),
+ tenancy=dict(choices=['default', 'dedicated'], default='default'),
+ dns_support=dict(type='bool', default=True),
+ dns_hostnames=dict(type='bool', default=True),
+ dhcp_opts_id=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ multi_ok=dict(type='bool', default=False),
+ purge_cidrs=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params.get('name')
+ cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
+ ipv6_cidr = module.params.get('ipv6_cidr')
+ purge_cidrs = module.params.get('purge_cidrs')
+ tenancy = module.params.get('tenancy')
+ dns_support = module.params.get('dns_support')
+ dns_hostnames = module.params.get('dns_hostnames')
+ dhcp_id = module.params.get('dhcp_opts_id')
+ tags = module.params.get('tags')
+ state = module.params.get('state')
+ multi = module.params.get('multi_ok')
+
+ changed = False
+
+ connection = module.client(
+ 'ec2',
+ retry_decorator=AWSRetry.jittered_backoff(
+ retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
+ )
+ )
+
+ if dns_hostnames and not dns_support:
+ module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
+
+ if state == 'present':
+
+ # Check if VPC exists
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if vpc_id is None:
+ vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
+ changed = True
+
+ vpc_obj = get_vpc(module, connection, vpc_id)
+
+ associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
+ if cidr['CidrBlockState']['State'] != 'disassociated')
+ to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
+ to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block]
+ expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add
+
+ if len(cidr_block) > 1:
+ for cidr in to_add:
+ changed = True
+ try:
+ connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
+ if ipv6_cidr:
+ if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
+ module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format(
+ vpc_id,
+ vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock']))
+ else:
+ try:
+ connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id, aws_retry=True)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
+
+ if purge_cidrs:
+ for association_id in to_remove:
+ changed = True
+ try:
+ connection.disassociate_vpc_cidr_block(AssociationId=association_id, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
+ "are associated with the CIDR block before you can disassociate it.".format(association_id))
+
+ if dhcp_id is not None:
+ try:
+ if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update DHCP options")
+
+ if tags is not None or name is not None:
+ try:
+ if update_vpc_tags(connection, module, vpc_id, tags, name):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update tags")
+
+ current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
+ current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
+ if current_dns_enabled != dns_support:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns support attribute")
+ if current_dns_hostnames != dns_hostnames:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
+
+ # wait for associated cidrs to match
+ if to_add or to_remove:
+ try:
+ connection.get_waiter('vpc_available').wait(
+ VpcIds=[vpc_id],
+ Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to wait for CIDRs to update", vpc_id=vpc_id)
+
+ # try to wait for enableDnsSupport and enableDnsHostnames to match
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
+
+ final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id))
+ final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', []))
+ final_state['id'] = final_state.pop('vpc_id')
+ debugging = dict(to_add=to_add, to_remove=to_remove, expected_cidrs=expected_cidrs)
+
+ module.exit_json(changed=changed, vpc=final_state, debugging=debugging)
+
+ elif state == 'absent':
+
+ # Check if VPC exists
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if vpc_id is not None:
+ try:
+ if not module.check_mode:
+ connection.delete_vpc(VpcId=vpc_id, aws_retry=True)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
+ "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id))
+
+ module.exit_json(changed=changed, vpc={})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_facts.py
new file mode 100644
index 00000000..62a9b1ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_facts.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPCs in AWS
+description:
+ - Gather information about ec2 VPCs in AWS
+ - This module was called C(ec2_vpc_net_facts) before Ansible 2.9. The usage did not change.
+author: "Rob White (@wimnat)"
+requirements:
+ - boto3
+ - botocore
+options:
+ vpc_ids:
+ description:
+ - A list of VPC IDs that exist in your account.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all VPCs
+- amazon.aws.ec2_vpc_net_info:
+
+# Gather information about a particular VPC using VPC ID
+- amazon.aws.ec2_vpc_net_info:
+ vpc_ids: vpc-00112233
+
+# Gather information about any VPC with a tag key Name and value Example
+- amazon.aws.ec2_vpc_net_info:
+ filters:
+ "tag:Name": Example
+
+'''
+
+RETURN = '''
+vpcs:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: The ID of the VPC (for backwards compatibility).
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC .
+ returned: always
+ type: str
+ state:
+ description: The state of the VPC.
+ returned: always
+ type: str
+ tags:
+ description: A dict of tags associated with the VPC.
+ returned: always
+ type: dict
+ instance_tenancy:
+ description: The instance tenancy setting for the VPC.
+ returned: always
+ type: str
+ is_default:
+ description: True if this is the default VPC for account.
+ returned: always
+ type: bool
+ cidr_block:
+ description: The IPv4 CIDR block assigned to the VPC.
+ returned: always
+ type: str
+ classic_link_dns_supported:
+ description: True/False depending on attribute setting for classic link DNS support.
+ returned: always
+ type: bool
+ classic_link_enabled:
+ description: True/False depending on if classic link support is enabled.
+ returned: always
+ type: bool
+ enable_dns_hostnames:
+ description: True/False depending on attribute setting for DNS hostnames support.
+ returned: always
+ type: bool
+ enable_dns_support:
+ description: True/False depending on attribute setting for DNS support.
+ returned: always
+ type: bool
+ cidr_block_association_set:
+ description: An array of IPv4 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ cidr_block:
+ description: The IPv4 CIDR block that is associated with the VPC.
+ returned: always
+ type: str
+ cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the VPC.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+ owner_id:
+ description: The AWS account which owns the VPC.
+ returned: always
+ type: str
+ sample: 123456789012
+ dhcp_options_id:
+ description: The ID of the DHCP options associated with this VPC.
+ returned: always
+ type: str
+ sample: dopt-12345678
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def describe_vpcs(connection, module):
+ """
+ Describe VPCs.
+
+ connection : boto3 client connection object
+ module : AnsibleAWSModule object
+ """
+ # collect parameters
+ filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ vpc_ids = module.params.get('vpc_ids')
+
+ # init empty list for return vars
+ vpc_info = list()
+ vpc_list = list()
+
+ # Get the basic VPC info
+ try:
+ response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids))
+
+ # Loop through results and create a list of VPC IDs
+ for vpc in response['Vpcs']:
+ vpc_list.append(vpc['VpcId'])
+
+ # We can get these results in bulk but still needs two separate calls to the API
+ try:
+ cl_enabled = connection.describe_vpc_classic_link(VpcIds=vpc_list, aws_retry=True)
+ except is_boto3_error_code('UnsupportedOperation'):
+ cl_enabled = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkEnabled': False} for vpc_id in vpc_list]}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Unable to describe if ClassicLink is enabled')
+
+ try:
+ cl_dns_support = connection.describe_vpc_classic_link_dns_support(VpcIds=vpc_list, aws_retry=True)
+ except is_boto3_error_code('UnsupportedOperation'):
+ cl_dns_support = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkDnsSupported': False} for vpc_id in vpc_list]}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Unable to describe if ClassicLinkDns is supported')
+
+ # Loop through the results and add the other VPC attributes we gathered
+ for vpc in response['Vpcs']:
+ error_message = "Unable to describe VPC attribute {0}"
+ # We have to make two separate calls per VPC to get these attributes.
+ try:
+ dns_support = connection.describe_vpc_attribute(VpcId=vpc['VpcId'],
+ Attribute='enableDnsSupport', aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg=error_message.format('enableDnsSupport'))
+ try:
+ dns_hostnames = connection.describe_vpc_attribute(VpcId=vpc['VpcId'],
+ Attribute='enableDnsHostnames', aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg=error_message.format('enableDnsHostnames'))
+
+ # loop through the ClassicLink Enabled results and add the value for the correct VPC
+ for item in cl_enabled['Vpcs']:
+ if vpc['VpcId'] == item['VpcId']:
+ vpc['ClassicLinkEnabled'] = item['ClassicLinkEnabled']
+
+ # loop through the ClassicLink DNS support results and add the value for the correct VPC
+ for item in cl_dns_support['Vpcs']:
+ if vpc['VpcId'] == item['VpcId']:
+ vpc['ClassicLinkDnsSupported'] = item['ClassicLinkDnsSupported']
+
+ # add the two DNS attributes
+ vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value')
+ vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value')
+ # for backwards compatibility
+ vpc['id'] = vpc['VpcId']
+ vpc_info.append(camel_dict_to_snake_dict(vpc))
+ # convert tag list to ansible dict
+ vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', []))
+
+ module.exit_json(vpcs=vpc_info)
+
+
+def main():
+ argument_spec = dict(
+ vpc_ids=dict(type='list', elements='str', default=[]),
+ filters=dict(type='dict', default={})
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_net_facts':
+ module.deprecate("The 'ec2_vpc_net_facts' module has been renamed to 'ec2_vpc_net_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ describe_vpcs(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py
new file mode 100644
index 00000000..62a9b1ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPCs in AWS
+description:
+ - Gather information about ec2 VPCs in AWS
+ - This module was called C(ec2_vpc_net_facts) before Ansible 2.9. The usage did not change.
+author: "Rob White (@wimnat)"
+requirements:
+ - boto3
+ - botocore
+options:
+ vpc_ids:
+ description:
+ - A list of VPC IDs that exist in your account.
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all VPCs
+- amazon.aws.ec2_vpc_net_info:
+
+# Gather information about a particular VPC using VPC ID
+- amazon.aws.ec2_vpc_net_info:
+ vpc_ids: vpc-00112233
+
+# Gather information about any VPC with a tag key Name and value Example
+- amazon.aws.ec2_vpc_net_info:
+ filters:
+ "tag:Name": Example
+
+'''
+
+RETURN = '''
+vpcs:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: The ID of the VPC (for backwards compatibility).
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC .
+ returned: always
+ type: str
+ state:
+ description: The state of the VPC.
+ returned: always
+ type: str
+ tags:
+ description: A dict of tags associated with the VPC.
+ returned: always
+ type: dict
+ instance_tenancy:
+ description: The instance tenancy setting for the VPC.
+ returned: always
+ type: str
+ is_default:
+ description: True if this is the default VPC for account.
+ returned: always
+ type: bool
+ cidr_block:
+ description: The IPv4 CIDR block assigned to the VPC.
+ returned: always
+ type: str
+ classic_link_dns_supported:
+ description: True/False depending on attribute setting for classic link DNS support.
+ returned: always
+ type: bool
+ classic_link_enabled:
+ description: True/False depending on if classic link support is enabled.
+ returned: always
+ type: bool
+ enable_dns_hostnames:
+ description: True/False depending on attribute setting for DNS hostnames support.
+ returned: always
+ type: bool
+ enable_dns_support:
+ description: True/False depending on attribute setting for DNS support.
+ returned: always
+ type: bool
+ cidr_block_association_set:
+ description: An array of IPv4 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ cidr_block:
+ description: The IPv4 CIDR block that is associated with the VPC.
+ returned: always
+ type: str
+ cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the VPC.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+ owner_id:
+ description: The AWS account which owns the VPC.
+ returned: always
+ type: str
+ sample: 123456789012
+ dhcp_options_id:
+ description: The ID of the DHCP options associated with this VPC.
+ returned: always
+ type: str
+ sample: dopt-12345678
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+def describe_vpcs(connection, module):
+ """
+ Describe VPCs.
+
+ connection : boto3 client connection object
+ module : AnsibleAWSModule object
+ """
+ # collect parameters
+ filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ vpc_ids = module.params.get('vpc_ids')
+
+ # init empty list for return vars
+ vpc_info = list()
+ vpc_list = list()
+
+ # Get the basic VPC info
+ try:
+ response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids))
+
+ # Loop through results and create a list of VPC IDs
+ for vpc in response['Vpcs']:
+ vpc_list.append(vpc['VpcId'])
+
+ # We can get these results in bulk but still needs two separate calls to the API
+ try:
+ cl_enabled = connection.describe_vpc_classic_link(VpcIds=vpc_list, aws_retry=True)
+ except is_boto3_error_code('UnsupportedOperation'):
+ cl_enabled = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkEnabled': False} for vpc_id in vpc_list]}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Unable to describe if ClassicLink is enabled')
+
+ try:
+ cl_dns_support = connection.describe_vpc_classic_link_dns_support(VpcIds=vpc_list, aws_retry=True)
+ except is_boto3_error_code('UnsupportedOperation'):
+ cl_dns_support = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkDnsSupported': False} for vpc_id in vpc_list]}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Unable to describe if ClassicLinkDns is supported')
+
+ # Loop through the results and add the other VPC attributes we gathered
+ for vpc in response['Vpcs']:
+ error_message = "Unable to describe VPC attribute {0}"
+ # We have to make two separate calls per VPC to get these attributes.
+ try:
+ dns_support = connection.describe_vpc_attribute(VpcId=vpc['VpcId'],
+ Attribute='enableDnsSupport', aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg=error_message.format('enableDnsSupport'))
+ try:
+ dns_hostnames = connection.describe_vpc_attribute(VpcId=vpc['VpcId'],
+ Attribute='enableDnsHostnames', aws_retry=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg=error_message.format('enableDnsHostnames'))
+
+ # loop through the ClassicLink Enabled results and add the value for the correct VPC
+ for item in cl_enabled['Vpcs']:
+ if vpc['VpcId'] == item['VpcId']:
+ vpc['ClassicLinkEnabled'] = item['ClassicLinkEnabled']
+
+ # loop through the ClassicLink DNS support results and add the value for the correct VPC
+ for item in cl_dns_support['Vpcs']:
+ if vpc['VpcId'] == item['VpcId']:
+ vpc['ClassicLinkDnsSupported'] = item['ClassicLinkDnsSupported']
+
+ # add the two DNS attributes
+ vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value')
+ vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value')
+ # for backwards compatibility
+ vpc['id'] = vpc['VpcId']
+ vpc_info.append(camel_dict_to_snake_dict(vpc))
+ # convert tag list to ansible dict
+ vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', []))
+
+ module.exit_json(vpcs=vpc_info)
+
+
+def main():
+ argument_spec = dict(
+ vpc_ids=dict(type='list', elements='str', default=[]),
+ filters=dict(type='dict', default={})
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._name == 'ec2_vpc_net_facts':
+ module.deprecate("The 'ec2_vpc_net_facts' module has been renamed to 'ec2_vpc_net_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+ describe_vpcs(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py
new file mode 100644
index 00000000..d9b34a1b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py
@@ -0,0 +1,599 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet
+version_added: 1.0.0
+short_description: Manage subnets in AWS virtual private clouds
+description:
+ - Manage subnets in AWS virtual private clouds.
+author:
+- Robert Estelle (@erydo)
+- Brad Davidson (@brandond)
+requirements: [ boto3 ]
+options:
+ az:
+ description:
+ - "The availability zone for the subnet."
+ type: str
+ cidr:
+ description:
+ - "The CIDR block for the subnet. E.g. 192.0.2.0/24."
+ type: str
+ required: true
+ ipv6_cidr:
+ description:
+ - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range."
+ - "Required if I(assign_instances_ipv6=true)"
+ type: str
+ tags:
+ description:
+ - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
+ aliases: [ 'resource_tags' ]
+ type: dict
+ state:
+ description:
+ - "Create or remove the subnet."
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ vpc_id:
+ description:
+ - "VPC ID of the VPC in which to create or delete the subnet."
+ required: true
+ type: str
+ map_public:
+ description:
+ - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default."
+ type: bool
+ default: 'no'
+ assign_instances_ipv6:
+ description:
+ - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address."
+ type: bool
+ default: false
+ wait:
+ description:
+ - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing."
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - "Number of seconds to wait for subnet to become available I(wait=True)."
+ default: 300
+ type: int
+ purge_tags:
+ description:
+ - Whether or not to remove tags that do not appear in the I(tags) list.
+ type: bool
+ default: true
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create subnet for database servers
+ amazon.aws.ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+ tags:
+ Name: Database Subnet
+ register: database_subnet
+
+- name: Remove subnet for database servers
+ amazon.aws.ec2_vpc_subnet:
+ state: absent
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+
+- name: Create subnet with IPv6 block assigned
+ amazon.aws.ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: 2001:db8:0:102::/64
+
+- name: Remove IPv6 block assigned to subnet
+ amazon.aws.ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: ''
+'''
+
+RETURN = '''
+subnet:
+ description: Dictionary of subnet values
+ returned: I(state=present)
+ type: complex
+ contains:
+ id:
+ description: Subnet resource id
+ returned: I(state=present)
+ type: str
+ sample: subnet-b883b2c4
+ cidr_block:
+ description: The IPv4 CIDR of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "10.0.0.0/16"
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block actively associated with the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "2001:db8:0:102::/64"
+ availability_zone:
+ description: Availability zone of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: us-east-1a
+ state:
+ description: state of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the Subnet, includes name
+ returned: I(state=present)
+ type: dict
+ sample: {"Name": "My Subnet", "env": "staging"}
+ map_public_ip_on_launch:
+ description: whether public IP is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ assign_ipv6_address_on_creation:
+ description: whether IPv6 address is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ vpc_id:
+ description: the id of the VPC where this Subnet exists
+ returned: I(state=present)
+ type: str
+ sample: vpc-67236184
+ available_ip_address_count:
+ description: number of available IPv4 addresses
+ returned: I(state=present)
+ type: str
+ sample: 251
+ default_for_az:
+ description: indicates whether this is the default Subnet for this Availability Zone
+ returned: I(state=present)
+ type: bool
+ sample: false
+ ipv6_association_id:
+ description: The IPv6 association ID for the currently associated CIDR
+ returned: I(state=present)
+ type: str
+ sample: subnet-cidr-assoc-b85c74d2
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: I(state=present)
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the subnet.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+'''
+
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ..module_utils.ec2 import compare_aws_tags
+from ..module_utils.waiters import get_waiter
+
+
+def get_subnet_info(subnet):
+ if 'Subnets' in subnet:
+ return [get_subnet_info(s) for s in subnet['Subnets']]
+ elif 'Subnet' in subnet:
+ subnet = camel_dict_to_snake_dict(subnet['Subnet'])
+ else:
+ subnet = camel_dict_to_snake_dict(subnet)
+
+ if 'tags' in subnet:
+ subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
+ else:
+ subnet['tags'] = dict()
+
+ if 'subnet_id' in subnet:
+ subnet['id'] = subnet['subnet_id']
+ del subnet['subnet_id']
+
+ subnet['ipv6_cidr_block'] = ''
+ subnet['ipv6_association_id'] = ''
+ ipv6set = subnet.get('ipv6_cidr_block_association_set')
+ if ipv6set:
+ for item in ipv6set:
+ if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
+ subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
+ subnet['ipv6_association_id'] = item['association_id']
+
+ return subnet
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(client, **params):
+ return client.describe_subnets(**params)
+
+
+def waiter_params(module, params, start_time):
+ if not module.botocore_at_least("1.7.0"):
+ remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
+ params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
+ return params
+
+
+def handle_waiter(conn, module, waiter_name, params, start_time):
+ try:
+ get_waiter(conn, waiter_name).wait(
+ **waiter_params(module, params, start_time)
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, "Failed to wait for updates to complete")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "An exception happened while trying to wait for updates")
+
+
+def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None):
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout']
+
+ params = dict(VpcId=vpc_id,
+ CidrBlock=cidr)
+
+ if ipv6_cidr:
+ params['Ipv6CidrBlock'] = ipv6_cidr
+
+ if az:
+ params['AvailabilityZone'] = az
+
+ try:
+ subnet = get_subnet_info(conn.create_subnet(**params))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create subnet")
+
+ # Sometimes AWS takes its time to create a subnet and so using
+ # new subnets's id to do things like create tags results in
+ # exception.
+ if wait and subnet.get('state') != 'available':
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+ try:
+ conn.get_waiter('subnet_available').wait(
+ **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time)
+ )
+ subnet['state'] = 'available'
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available")
+
+ return subnet
+
+
+def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
+ changed = False
+
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'})
+ try:
+ cur_tags = conn.describe_tags(Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+
+ if to_update:
+ try:
+ if not module.check_mode:
+ AWSRetry.exponential_backoff(
+ catch_extra_error_codes=['InvalidSubnetID.NotFound']
+ )(conn.create_tags)(
+ Resources=[subnet['id']],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create tags")
+
+ if to_delete:
+ try:
+ if not module.check_mode:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ AWSRetry.exponential_backoff(
+ catch_extra_error_codes=['InvalidSubnetID.NotFound']
+ )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list)
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags")
+
+ if module.params['wait'] and not module.check_mode:
+ # Wait for tags to be updated
+ filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ return changed
+
+
+def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def disassociate_ipv6_cidr(conn, module, subnet, start_time):
+ if subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
+
+ try:
+ conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
+ .format(subnet['ipv6_association_id'], subnet['id']))
+
+ # Wait for cidr block to be disassociated
+ if module.params['wait']:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['disassociated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+
+def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
+ wait = module.params['wait']
+ changed = False
+
+ if subnet['ipv6_association_id'] and not ipv6_cidr:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ if ipv6_cidr:
+ filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
+ 'vpc-id': subnet['vpc_id']})
+
+ try:
+ check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get subnet info")
+
+ if check_subnets and check_subnets[0]['ipv6_cidr_block']:
+ module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
+
+ if subnet['ipv6_association_id']:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ try:
+ if not check_mode:
+ associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
+ else:
+ if not check_mode and wait:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['associated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
+ subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
+ subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
+ if subnet['ipv6_cidr_block_association_set']:
+ subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
+ else:
+ subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
+
+ return changed
+
+
+def get_matching_subnet(conn, module, vpc_id, cidr):
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
+ try:
+ subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get matching subnet")
+
+ if subnets:
+ return subnets[0]
+
+ return None
+
+
+def ensure_subnet_present(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ changed = False
+
+ # Initialize start so max time does not exceed the specified wait_timeout for multiple operations
+ start_time = time.time()
+
+ if subnet is None:
+ if not module.check_mode:
+ subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
+ ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time)
+ changed = True
+ # Subnet will be None when check_mode is true
+ if subnet is None:
+ return {
+ 'changed': changed,
+ 'subnet': {}
+ }
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
+ if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
+ changed = True
+
+ if module.params['map_public'] != subnet['map_public_ip_on_launch']:
+ ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['tags'] != subnet['tags']:
+ stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
+ if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
+ changed = True
+
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if not module.check_mode and module.params['wait']:
+ # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
+ # so we only wait for those if necessary just before returning the subnet
+ subnet = ensure_final_subnet(conn, module, subnet, start_time)
+
+ return {
+ 'changed': changed,
+ 'subnet': subnet
+ }
+
+
+def ensure_final_subnet(conn, module, subnet, start_time):
+ for rewait in range(0, 30):
+ map_public_correct = False
+ assign_ipv6_correct = False
+
+ if module.params['map_public'] == subnet['map_public_ip_on_launch']:
+ map_public_correct = True
+ else:
+ if module.params['map_public']:
+ handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
+ assign_ipv6_correct = True
+ else:
+ if module.params['assign_instances_ipv6']:
+ handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if map_public_correct and assign_ipv6_correct:
+ break
+
+ time.sleep(5)
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+
+ return subnet
+
+
+def ensure_subnet_absent(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if subnet is None:
+ return {'changed': False}
+
+ try:
+ if not module.check_mode:
+ conn.delete_subnet(SubnetId=subnet['id'])
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
+ return {'changed': True}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete subnet")
+
+
+def main():
+ argument_spec = dict(
+ az=dict(default=None, required=False),
+ cidr=dict(required=True),
+ ipv6_cidr=dict(default='', required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id=dict(required=True),
+ map_public=dict(default=False, required=False, type='bool'),
+ assign_instances_ipv6=dict(default=False, required=False, type='bool'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300, required=False),
+ purge_tags=dict(default=True, type='bool')
+ )
+
+ required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+
+ if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
+ module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
+
+ if not module.botocore_at_least("1.7.0"):
+ module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times")
+
+ connection = module.client('ec2')
+
+ state = module.params.get('state')
+
+ try:
+ if state == 'present':
+ result = ensure_subnet_present(connection, module)
+ elif state == 'absent':
+ result = ensure_subnet_absent(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_facts.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_facts.py
new file mode 100644
index 00000000..316d532e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_facts.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPC subnets in AWS
+description:
+ - Gather information about ec2 VPC subnets in AWS
+ - This module was called C(ec2_vpc_subnet_facts) before Ansible 2.9. The usage did not change.
+author: "Rob White (@wimnat)"
+requirements:
+ - boto3
+ - botocore
+options:
+ subnet_ids:
+ description:
+ - A list of subnet IDs to gather information for.
+ aliases: ['subnet_id']
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all VPC subnets
+- amazon.aws.ec2_vpc_subnet_info:
+
+# Gather information about a particular VPC subnet using ID
+- amazon.aws.ec2_vpc_subnet_info:
+ subnet_ids: subnet-00112233
+
+# Gather information about any VPC subnet with a tag key Name and value Example
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any VPC subnet within VPC with ID vpc-abcdef00
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ vpc-id: vpc-abcdef00
+
+# Gather information about a set of VPC subnets, publicA, publicB and publicC within a
+# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
+# subnet_ids as a list.
+
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ vpc-id: vpc-abcdef00
+ "tag:Name": "{{ item }}"
+ loop:
+ - publicA
+ - publicB
+ - publicC
+ register: subnet_info
+
+- set_fact:
+ subnet_ids: "{{ subnet_info.subnets|map(attribute='id')|list }}"
+'''
+
+RETURN = '''
+subnets:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ subnet_id:
+ description: The ID of the Subnet.
+ returned: always
+ type: str
+ id:
+ description: The ID of the Subnet (for backwards compatibility).
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC .
+ returned: always
+ type: str
+ state:
+ description: The state of the subnet.
+ returned: always
+ type: str
+ tags:
+ description: A dict of tags associated with the Subnet.
+ returned: always
+ type: dict
+ map_public_ip_on_launch:
+ description: True/False depending on attribute setting for public IP mapping.
+ returned: always
+ type: bool
+ default_for_az:
+ description: True if this is the default subnet for AZ.
+ returned: always
+ type: bool
+ cidr_block:
+ description: The IPv4 CIDR block assigned to the subnet.
+ returned: always
+ type: str
+ available_ip_address_count:
+ description: Count of available IPs in subnet.
+ returned: always
+ type: str
+ availability_zone:
+ description: The availability zone where the subnet exists.
+ returned: always
+ type: str
+ assign_ipv6_address_on_creation:
+ description: True/False depending on attribute setting for IPv6 address assignment.
+ returned: always
+ type: bool
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the subnet.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(connection, subnet_ids, filters):
+ """
+ Describe Subnets with AWSRetry backoff throttling support.
+
+ connection : boto3 client connection object
+ subnet_ids : list of subnet ids for which to gather information
+ filters : additional filters to apply to request
+ """
+ return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters)
+
+
+def describe_subnets(connection, module):
+ """
+ Describe Subnets.
+
+ module : AnsibleAWSModule object
+ connection : boto3 client connection object
+ """
+ # collect parameters
+ filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ subnet_ids = module.params.get('subnet_ids')
+
+ if subnet_ids is None:
+ # Set subnet_ids to empty list if it is None
+ subnet_ids = []
+
+ # init empty list for return vars
+ subnet_info = list()
+
+ # Get the basic VPC info
+ try:
+ response = describe_subnets_with_backoff(connection, subnet_ids, filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to describe subnets')
+
+ for subnet in response['Subnets']:
+ # for backwards compatibility
+ subnet['id'] = subnet['SubnetId']
+ subnet_info.append(camel_dict_to_snake_dict(subnet))
+ # convert tag list to ansible dict
+ subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', []))
+
+ module.exit_json(subnets=subnet_info)
+
+
+def main():
+ argument_spec = dict(
+ subnet_ids=dict(type='list', elements='str', default=[], aliases=['subnet_id']),
+ filters=dict(type='dict', default={})
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ if module._name == 'ec2_vpc_subnet_facts':
+ module.deprecate("The 'ec2_vpc_subnet_facts' module has been renamed to 'ec2_vpc_subnet_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2')
+
+ describe_subnets(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py
new file mode 100644
index 00000000..316d532e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet_info
+version_added: 1.0.0
+short_description: Gather information about ec2 VPC subnets in AWS
+description:
+ - Gather information about ec2 VPC subnets in AWS
+ - This module was called C(ec2_vpc_subnet_facts) before Ansible 2.9. The usage did not change.
+author: "Rob White (@wimnat)"
+requirements:
+ - boto3
+ - botocore
+options:
+ subnet_ids:
+ description:
+ - A list of subnet IDs to gather information for.
+ aliases: ['subnet_id']
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all VPC subnets
+- amazon.aws.ec2_vpc_subnet_info:
+
+# Gather information about a particular VPC subnet using ID
+- amazon.aws.ec2_vpc_subnet_info:
+ subnet_ids: subnet-00112233
+
+# Gather information about any VPC subnet with a tag key Name and value Example
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any VPC subnet within VPC with ID vpc-abcdef00
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ vpc-id: vpc-abcdef00
+
+# Gather information about a set of VPC subnets, publicA, publicB and publicC within a
+# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
+# subnet_ids as a list.
+
+- amazon.aws.ec2_vpc_subnet_info:
+ filters:
+ vpc-id: vpc-abcdef00
+ "tag:Name": "{{ item }}"
+ loop:
+ - publicA
+ - publicB
+ - publicC
+ register: subnet_info
+
+- set_fact:
+ subnet_ids: "{{ subnet_info.subnets|map(attribute='id')|list }}"
+'''
+
+RETURN = '''
+subnets:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: complex
+ contains:
+ subnet_id:
+ description: The ID of the Subnet.
+ returned: always
+ type: str
+ id:
+ description: The ID of the Subnet (for backwards compatibility).
+ returned: always
+ type: str
+ vpc_id:
+ description: The ID of the VPC .
+ returned: always
+ type: str
+ state:
+ description: The state of the subnet.
+ returned: always
+ type: str
+ tags:
+ description: A dict of tags associated with the Subnet.
+ returned: always
+ type: dict
+ map_public_ip_on_launch:
+ description: True/False depending on attribute setting for public IP mapping.
+ returned: always
+ type: bool
+ default_for_az:
+ description: True if this is the default subnet for AZ.
+ returned: always
+ type: bool
+ cidr_block:
+ description: The IPv4 CIDR block assigned to the subnet.
+ returned: always
+ type: str
+ available_ip_address_count:
+ description: Count of available IPs in subnet.
+ returned: always
+ type: str
+ availability_zone:
+ description: The availability zone where the subnet exists.
+ returned: always
+ type: str
+ assign_ipv6_address_on_creation:
+ description: True/False depending on attribute setting for IPv6 address assignment.
+ returned: always
+ type: bool
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: always
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the subnet.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(connection, subnet_ids, filters):
+ """
+ Describe Subnets with AWSRetry backoff throttling support.
+
+ connection : boto3 client connection object
+ subnet_ids : list of subnet ids for which to gather information
+ filters : additional filters to apply to request
+ """
+ return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters)
+
+
+def describe_subnets(connection, module):
+ """
+ Describe Subnets.
+
+ module : AnsibleAWSModule object
+ connection : boto3 client connection object
+ """
+ # collect parameters
+ filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ subnet_ids = module.params.get('subnet_ids')
+
+ if subnet_ids is None:
+ # Set subnet_ids to empty list if it is None
+ subnet_ids = []
+
+ # init empty list for return vars
+ subnet_info = list()
+
+ # Get the basic VPC info
+ try:
+ response = describe_subnets_with_backoff(connection, subnet_ids, filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Failed to describe subnets')
+
+ for subnet in response['Subnets']:
+ # for backwards compatibility
+ subnet['id'] = subnet['SubnetId']
+ subnet_info.append(camel_dict_to_snake_dict(subnet))
+ # convert tag list to ansible dict
+ subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', []))
+
+ module.exit_json(subnets=subnet_info)
+
+
+def main():
+ argument_spec = dict(
+ subnet_ids=dict(type='list', elements='str', default=[], aliases=['subnet_id']),
+ filters=dict(type='dict', default={})
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+ if module._name == 'ec2_vpc_subnet_facts':
+ module.deprecate("The 'ec2_vpc_subnet_facts' module has been renamed to 'ec2_vpc_subnet_info'", date='2021-12-01', collection_name='amazon.aws')
+
+ connection = module.client('ec2')
+
+ describe_subnets(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
new file mode 100644
index 00000000..3c4f6422
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
@@ -0,0 +1,876 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: s3_bucket
+version_added: 1.0.0
+short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
+description:
+ - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID.
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ force:
+ description:
+ - When trying to delete a bucket, delete all keys (including versions and delete markers)
+ in the bucket first (an S3 bucket must be empty for a successful deletion).
+ type: bool
+ default: 'no'
+ name:
+ description:
+ - Name of the S3 bucket.
+ required: true
+ type: str
+ policy:
+ description:
+ - The JSON policy as a string.
+ type: json
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and FakeS3 etc.
+ - Assumes AWS if not specified.
+ - For Walrus, use FQDN of the endpoint without scheme nor path.
+ aliases: [ S3_URL ]
+ type: str
+ ceph:
+ description:
+ - Enable API compatibility with Ceph. It takes into account the S3 API subset working
+ with Ceph in order to provide the same module behaviour where possible.
+ type: bool
+ default: false
+ requester_pays:
+ description:
+ - With Requester Pays buckets, the requester instead of the bucket owner pays the cost
+ of the request and the data download from the bucket.
+ type: bool
+ state:
+ description:
+ - Create or remove the S3 bucket.
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ tags:
+ description:
+ - Tags dict to apply to bucket.
+ type: dict
+ purge_tags:
+ description:
+ - Whether to remove tags that aren't present in the I(tags) parameter.
+ type: bool
+ default: True
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended).
+ type: bool
+ encryption:
+ description:
+ - Describes the default server-side encryption to apply to new objects in the bucket.
+ In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
+ choices: [ 'none', 'AES256', 'aws:kms' ]
+ type: str
+ encryption_key_id:
+ description: KMS master key ID to use for the default encryption. This parameter is allowed if I(encryption) is C(aws:kms). If
+ not specified then it will default to the AWS provided KMS key.
+ type: str
+ public_access:
+ description:
+ - Configure public access block for S3 bucket.
+ - This option cannot be used together with I(delete_public_access).
+ suboptions:
+ block_public_acls:
+ description: Sets BlockPublicAcls value.
+ type: bool
+ default: False
+ block_public_policy:
+ description: Sets BlockPublicPolicy value.
+ type: bool
+ default: False
+ ignore_public_acls:
+ description: Sets IgnorePublicAcls value.
+ type: bool
+ default: False
+ restrict_public_buckets:
+ description: Sets RestrictPublicAcls value.
+ type: bool
+ default: False
+ type: dict
+ version_added: 1.3.0
+ delete_public_access:
+ description:
+ - Delete public access block configuration from bucket.
+ - This option cannot be used together with a I(public_access) definition.
+ default: false
+ type: bool
+ version_added: 1.3.0
+
+extends_documentation_fragment:
+- amazon.aws.aws
+- amazon.aws.ec2
+
+notes:
+ - If C(requestPayment), C(policy), C(tagging) or C(versioning)
+ operations/API aren't implemented by the endpoint, module doesn't fail
+ if each parameter satisfies the following condition.
+ I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple S3 bucket
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+
+# Create a simple S3 bucket on Ceph Rados Gateway
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ s3_url: http://your-ceph-rados-gateway-server.xxx
+ ceph: true
+
+# Remove an S3 bucket and any keys it contains
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: absent
+ force: yes
+
+# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ policy: "{{ lookup('file','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+
+# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
+- amazon.aws.s3_bucket:
+ name: mydobucket
+ s3_url: 'https://nyc3.digitaloceanspaces.com'
+
+# Create a bucket with AES256 encryption
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "AES256"
+
+# Create a bucket with aws:kms encryption, KMS key
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+ encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
+
+# Create a bucket with aws:kms encryption, default key
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+
+# Create a bucket with public policy block configuration
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ public_access:
+ BlockPublicAcls: true
+ IgnorePublicAcls: true
+ ## keys == 'false' can be ommited, undefined keys defaults to 'false'
+ # BlockPublicPolicy: false
+ # RestrictPublicBuckets: false
+
+# Delete public policy block from bucket
+- amazon.aws.s3_bucket:
+ name: mys3bucket
+ state: present
+ delete_public_access: true
+'''
+
+import json
+import os
+import time
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.basic import to_text
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+from ..module_utils.core import AnsibleAWSModule
+from ..module_utils.core import is_boto3_error_code
+from ..module_utils.ec2 import AWSRetry
+from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ..module_utils.ec2 import boto3_conn
+from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ..module_utils.ec2 import compare_policies
+from ..module_utils.ec2 import get_aws_connection_info
+from ..module_utils.ec2 import snake_dict_to_camel_dict
+
+
+def create_or_update_bucket(s3_client, module, location):
+
+ policy = module.params.get("policy")
+ name = module.params.get("name")
+ requester_pays = module.params.get("requester_pays")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ versioning = module.params.get("versioning")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+ public_access = module.params.get("public_access")
+ delete_public_access = module.params.get("delete_public_access")
+ changed = False
+ result = {}
+
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ try:
+ bucket_changed = create_bucket(s3_client, name, location)
+ s3_client.get_waiter('bucket_exists').wait(Bucket=name)
+ changed = changed or bucket_changed
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket")
+
+ # Versioning
+ try:
+ versioning_status = get_bucket_versioning(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as exp:
+ if versioning is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket versioning")
+ except (BotoCoreError, ClientError) as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket versioning")
+ else:
+ if versioning is not None:
+ required_versioning = None
+ if versioning and versioning_status.get('Status') != "Enabled":
+ required_versioning = 'Enabled'
+ elif not versioning and versioning_status.get('Status') == "Enabled":
+ required_versioning = 'Suspended'
+
+ if required_versioning:
+ try:
+ put_bucket_versioning(s3_client, name, required_versioning)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket versioning")
+
+ versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
+
+ # This output format is there to ensure compatibility with previous versions of the module
+ result['versioning'] = {
+ 'Versioning': versioning_status.get('Status', 'Disabled'),
+ 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
+ }
+
+ # Requester pays
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']):
+ if requester_pays is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket request payment")
+ except (BotoCoreError, ClientError) as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket request payment")
+ else:
+ if requester_pays is not None:
+ payer = 'Requester' if requester_pays else 'BucketOwner'
+ if requester_pays_status != payer:
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
+ if requester_pays_status is None:
+ # We have seen that it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
+ changed = True
+
+ result['requester_pays'] = requester_pays
+
+ # Policy
+ try:
+ current_policy = get_bucket_policy(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']):
+ if policy is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket policy")
+ except (BotoCoreError, ClientError) as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket policy")
+ else:
+ if policy is not None:
+ if isinstance(policy, string_types):
+ policy = json.loads(policy)
+
+ if not policy and current_policy:
+ try:
+ delete_bucket_policy(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy)
+ changed = True
+ elif compare_policies(current_policy, policy):
+ try:
+ put_bucket_policy(s3_client, name, policy)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
+ if current_policy is None:
+ # As for request payement, it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_policy(s3_client, name, policy)
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
+ changed = True
+
+ result['policy'] = current_policy
+
+ # Tags
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
+ except is_boto3_error_code(['NotImplemented', 'XNotImplemented']):
+ if tags is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket tags")
+ except (ClientError, BotoCoreError) as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket tags")
+ else:
+ if tags is not None:
+ # Tags are always returned as text
+ tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
+ if not purge_tags:
+ # Ensure existing tags that aren't updated by desired tags remain
+ current_copy = current_tags_dict.copy()
+ current_copy.update(tags)
+ tags = current_copy
+ if current_tags_dict != tags:
+ if tags:
+ try:
+ put_bucket_tagging(s3_client, name, tags)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket tags")
+ else:
+ if purge_tags:
+ try:
+ delete_bucket_tagging(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket tags")
+ current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
+ changed = True
+
+ result['tags'] = current_tags_dict
+
+ # Encryption
+ try:
+ current_encryption = get_bucket_encryption(s3_client, name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket encryption")
+
+ if encryption is not None:
+ current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
+ current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
+ if encryption == 'none' and current_encryption_algorithm is not None:
+ try:
+ delete_bucket_encryption(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
+ changed = True
+ elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
+ expected_encryption = {'SSEAlgorithm': encryption}
+ if encryption == 'aws:kms' and encryption_key_id is not None:
+ expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
+ current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption)
+ changed = True
+
+ result['encryption'] = current_encryption
+
+ # Public access clock configuration
+ current_public_access = {}
+
+ # -- Create / Update public access block
+ if public_access is not None:
+ try:
+ current_public_access = get_bucket_public_access(s3_client, name)
+ except (ClientError, BotoCoreError) as err_public_access:
+ module.fail_json_aws(err_public_access, msg="Failed to get bucket public access configuration")
+ camel_public_block = snake_dict_to_camel_dict(public_access, capitalize_first=True)
+
+ if current_public_access == camel_public_block:
+ result['public_access_block'] = current_public_access
+ else:
+ put_bucket_public_access(s3_client, name, camel_public_block)
+ changed = True
+ result['public_access_block'] = camel_public_block
+
+ # -- Delete public access block
+ if delete_public_access:
+ try:
+ current_public_access = get_bucket_public_access(s3_client, name)
+ except (ClientError, BotoCoreError) as err_public_access:
+ module.fail_json_aws(err_public_access, msg="Failed to get bucket public access configuration")
+
+ if current_public_access == {}:
+ result['public_access_block'] = current_public_access
+ else:
+ delete_bucket_public_access(s3_client, name)
+ changed = True
+ result['public_access_block'] = {}
+
+ # Module exit
+ module.exit_json(changed=changed, name=name, **result)
+
+
+def bucket_exists(s3_client, bucket_name):
+ # head_bucket appeared to be really inconsistent, so we use list_buckets instead,
+ # and loop over all the buckets, even if we know it's less performant :(
+ all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
+ return any(bucket['Name'] == bucket_name for bucket in all_buckets)
+
+
+@AWSRetry.exponential_backoff(max_delay=120)
+def create_bucket(s3_client, bucket_name, location):
+ try:
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ if len(configuration) > 0:
+ s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
+ else:
+ s3_client.create_bucket(Bucket=bucket_name)
+ return True
+ except is_boto3_error_code('BucketAlreadyOwnedByYou'):
+ # We should never get here since we check the bucket presence before calling the create_or_update_bucket
+ # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
+ return False
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_tagging(s3_client, bucket_name, tags):
+ s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_policy(s3_client, bucket_name, policy):
+ s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_policy(s3_client, bucket_name):
+ s3_client.delete_bucket_policy(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_policy(s3_client, bucket_name):
+ try:
+ current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
+ except is_boto3_error_code('NoSuchBucketPolicy'):
+ return None
+
+ return current_policy
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_request_payment(s3_client, bucket_name, payer):
+ s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_request_payment(s3_client, bucket_name):
+ return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_versioning(s3_client, bucket_name):
+ return s3_client.get_bucket_versioning(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_versioning(s3_client, bucket_name, required_versioning):
+ s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def get_bucket_encryption(s3_client, bucket_name):
+ if not hasattr(s3_client, "get_bucket_encryption"):
+ return None
+
+ try:
+ result = s3_client.get_bucket_encryption(Bucket=bucket_name)
+ return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
+ except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'):
+ return None
+ except (IndexError, KeyError):
+ return None
+
+
+def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption):
+ max_retries = 3
+ for retries in range(1, max_retries + 1):
+ try:
+ put_bucket_encryption(s3_client, name, expected_encryption)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to set bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption,
+ should_fail=(retries == max_retries), retries=5)
+ if current_encryption == expected_encryption:
+ return current_encryption
+
+ # We shouldn't get here, the only time this should happen is if
+ # current_encryption != expected_encryption and retries == max_retries
+ # Which should use module.fail_json and fail out first.
+ module.fail_json(msg='Failed to apply bucket encryption',
+ current=current_encryption, expected=expected_encryption, retries=retries)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_encryption(s3_client, bucket_name, encryption):
+ server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
+ s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_tagging(s3_client, bucket_name):
+ s3_client.delete_bucket_tagging(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_encryption(s3_client, bucket_name):
+ s3_client.delete_bucket_encryption(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=['OperationAborted'])
+def delete_bucket(s3_client, bucket_name):
+ try:
+ s3_client.delete_bucket(Bucket=bucket_name)
+ except is_boto3_error_code('NoSuchBucket'):
+ # This means bucket should have been in a deleting state when we checked it existence
+ # We just ignore the error
+ pass
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def put_bucket_public_access(s3_client, bucket_name, public_acces):
+ '''
+ Put new public access block to S3 bucket
+ '''
+ s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=public_acces)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
+def delete_bucket_public_access(s3_client, bucket_name):
+ '''
+ Delete public access block from S3 bucket
+ '''
+ s3_client.delete_public_access_block(Bucket=bucket_name)
+
+
+def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ current_policy = get_bucket_policy(s3_client, bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+
+ if compare_policies(current_policy, expected_policy):
+ time.sleep(5)
+ else:
+ return current_policy
+ if should_fail:
+ module.fail_json(msg="Bucket policy failed to apply in the expected time",
+ requested_policy=expected_policy, live_policy=current_policy)
+ else:
+ return None
+
+
+def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket request payment")
+ if requester_pays_status != expected_payer:
+ time.sleep(5)
+ else:
+ return requester_pays_status
+ if should_fail:
+ module.fail_json(msg="Bucket request payment failed to apply in the expected time",
+ requested_status=expected_payer, live_status=requester_pays_status)
+ else:
+ return None
+
+
+def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
+ for dummy in range(0, retries):
+ try:
+ encryption = get_bucket_encryption(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
+ if encryption != expected_encryption:
+ time.sleep(5)
+ else:
+ return encryption
+
+ if should_fail:
+ module.fail_json(msg="Bucket encryption failed to apply in the expected time",
+ requested_encryption=expected_encryption, live_encryption=encryption)
+
+ return encryption
+
+
+def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
+ for dummy in range(0, 24):
+ try:
+ versioning_status = get_bucket_versioning(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
+ if versioning_status.get('Status') != required_versioning:
+ time.sleep(8)
+ else:
+ return versioning_status
+ module.fail_json(msg="Bucket versioning failed to apply in the expected time",
+ requested_versioning=required_versioning, live_versioning=versioning_status)
+
+
+def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
+ for dummy in range(0, 12):
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+ if current_tags_dict != expected_tags_dict:
+ time.sleep(5)
+ else:
+ return current_tags_dict
+ module.fail_json(msg="Bucket tags failed to apply in the expected time",
+ requested_tags=expected_tags_dict, live_tags=current_tags_dict)
+
+
+def get_current_bucket_tags_dict(s3_client, bucket_name):
+ try:
+ current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
+ except is_boto3_error_code('NoSuchTagSet'):
+ return {}
+ # The Ceph S3 API returns a different error code to AWS
+ except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except
+ return {}
+
+ return boto3_tag_list_to_ansible_dict(current_tags)
+
+
+def get_bucket_public_access(s3_client, bucket_name):
+ '''
+ Get current bucket public access block
+ '''
+ try:
+ bucket_public_access_block = s3_client.get_public_access_block(Bucket=bucket_name)
+ return bucket_public_access_block['PublicAccessBlockConfiguration']
+ except is_boto3_error_code('NoSuchPublicAccessBlockConfiguration'):
+ return {}
+
+
+def paginated_list(s3_client, **pagination_params):
+ pg = s3_client.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versions_list(s3_client, **pagination_params):
+ try:
+ pg = s3_client.get_paginator('list_object_versions')
+ for page in pg.paginate(**pagination_params):
+ # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
+ yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
+ except is_boto3_error_code('NoSuchBucket'):
+ yield []
+
+
+def destroy_bucket(s3_client, module):
+
+ force = module.params.get("force")
+ name = module.params.get("name")
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ module.exit_json(changed=False)
+
+ if force:
+ # if there are contents then we need to delete them (including versions) before we can delete the bucket
+ try:
+ for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
+ formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
+ for fk in formatted_keys:
+ # remove VersionId from cases where they are `None` so that
+ # unversioned objects are deleted using `DeleteObject`
+ # rather than `DeleteObjectVersion`, improving backwards
+ # compatibility with older IAM policies.
+ if not fk.get('VersionId'):
+ fk.pop('VersionId')
+
+ if formatted_keys:
+ resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
+ if resp.get('Errors'):
+ module.fail_json(
+ msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
+ ', '.join([k['Key'] for k in resp['Errors']])
+ ),
+ errors=resp['Errors'], response=resp
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket")
+
+ try:
+ delete_bucket(s3_client, name)
+ s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket")
+
+ module.exit_json(changed=True)
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
+ if s3_url and ceph: # TODO - test this
+ ceph = urlparse(s3_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse(s3_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ return boto3_conn(**params)
+
+
+def main():
+
+ argument_spec = dict(
+ force=dict(default=False, type='bool'),
+ policy=dict(type='json'),
+ name=dict(required=True),
+ requester_pays=dict(type='bool'),
+ s3_url=dict(aliases=['S3_URL']),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ versioning=dict(type='bool'),
+ ceph=dict(default=False, type='bool'),
+ encryption=dict(choices=['none', 'AES256', 'aws:kms']),
+ encryption_key_id=dict(),
+ public_access=dict(type='dict', options=dict(
+ block_public_acls=dict(type='bool', default=False),
+ ignore_public_acls=dict(type='bool', default=False),
+ block_public_policy=dict(type='bool', default=False),
+ restrict_public_buckets=dict(type='bool', default=False))),
+ delete_public_access=dict(type='bool', default=False)
+ )
+
+ required_by = dict(
+ encryption_key_id=('encryption',),
+ )
+
+ mutually_exclusive = [
+ ['public_access', 'delete_public_access']
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec, required_by=required_by, mutually_exclusive=mutually_exclusive
+ )
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ s3_url = module.params.get('s3_url')
+ ceph = module.params.get('ceph')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if ceph and not s3_url:
+ module.fail_json(msg='ceph flavour requires s3_url')
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to Ceph RGW, Walrus or fakes3
+ if s3_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
+
+ if s3_client is None: # this should never happen
+ module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
+
+ state = module.params.get("state")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+
+ if not hasattr(s3_client, "get_bucket_encryption"):
+ if encryption is not None:
+ module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
+
+ # Parameter validation
+ if encryption_key_id is not None and encryption != 'aws:kms':
+ module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
+
+ if state == 'present':
+ create_or_update_bucket(s3_client, module, location)
+ elif state == 'absent':
+ destroy_bucket(s3_client, module)
+
+
+if __name__ == '__main__':
+ main()