summaryrefslogtreecommitdiffstats
path: root/ansible_collections/amazon/aws/plugins/module_utils
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/amazon/aws/plugins/module_utils')
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/_version.py344
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/acm.py222
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/arn.py69
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/batch.py58
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/botocore.py394
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/cloud.py213
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py229
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/core.py77
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py89
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/ec2.py310
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py109
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/elbv2.py1092
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/iam.py75
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/modules.py447
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/policy.py179
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/rds.py390
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/retries.py78
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/route53.py64
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/s3.py102
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/tagging.py181
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/tower.py83
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/transformation.py140
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/urls.py238
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/version.py18
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/waf.py224
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/waiters.py1265
26 files changed, 6690 insertions, 0 deletions
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/_version.py b/ansible_collections/amazon/aws/plugins/module_utils/_version.py
new file mode 100644
index 000000000..d91cf3ab4
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/_version.py
@@ -0,0 +1,344 @@
+# Vendored copy of distutils/version.py from CPython 3.9.5
+#
+# Implements multiple version numbering conventions for the
+# Python Module Distribution Utilities.
+#
+# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+#
+
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering). There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+
+Every version number class implements the following interface:
+ * the 'parse' method takes a string and parses it to some internal
+ representation; if the string is an invalid version number,
+ 'parse' raises a ValueError exception
+ * the class constructor takes an optional string argument which,
+ if supplied, is passed to 'parse'
+ * __str__ reconstructs the string that was passed to 'parse' (or
+ an equivalent string -- ie. one that will generate an equivalent
+ version number instance)
+ * __repr__ generates Python code to recreate the version number instance
+ * _cmp compares the current instance with either another instance
+ of the same class or a string (which will be parsed to an instance
+ of the same class, thus must follow the same rules)
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import re
+
+try:
+ RE_FLAGS = re.VERBOSE | re.ASCII
+except AttributeError:
+ RE_FLAGS = re.VERBOSE
+
+
+class Version:
+ """Abstract base class for version numbering classes. Just provides
+ constructor (__init__) and reproducer (__repr__), because those
+ seem to be the same for all version numbering classes; and route
+ rich comparisons to _cmp.
+ """
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__(self):
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
+
+ def __eq__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c == 0
+
+ def __lt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c < 0
+
+ def __le__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c <= 0
+
+ def __gt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c > 0
+
+ def __ge__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c >= 0
+
+
+# Interface for version-number classes -- must be implemented
+# by the following classes (the concrete ones -- Version should
+# be treated as an abstract class).
+# __init__ (string) - create and take same action as 'parse'
+# (string parameter is optional)
+# parse (string) - convert a string representation to whatever
+# internal representation is appropriate for
+# this style of version numbering
+# __str__ (self) - convert back to a string; should be very similar
+# (if not identical to) the string supplied to parse
+# __repr__ (self) - generate Python code to recreate
+# the instance
+# _cmp (self, other) - compare two version numbers ('other' may
+# be an unparsed version string, or another
+# instance of your version class)
+
+
+class StrictVersion(Version):
+ """Version numbering for anal retentives and software idealists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of two or three
+ dot-separated numeric components, with an optional "pre-release" tag
+ on the end. The pre-release tag consists of the letter 'a' or 'b'
+ followed by a number. If the numeric components of two version
+ numbers are equal, then one with a pre-release tag will always
+ be deemed earlier (lesser) than one without.
+
+ The following are valid version numbers (shown in the order that
+ would be obtained by sorting according to the supplied cmp function):
+
+ 0.4 0.4.0 (these two are equivalent)
+ 0.4.1
+ 0.5a1
+ 0.5b3
+ 0.5
+ 0.9.6
+ 1.0
+ 1.0.4a3
+ 1.0.4b1
+ 1.0.4
+
+ The following are examples of invalid version numbers:
+
+ 1
+ 2.7.2.2
+ 1.3.a4
+ 1.3pl1
+ 1.3c4
+
+ The rationale for this version numbering system will be explained
+ in the distutils documentation.
+ """
+
+ version_re = re.compile(r"^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$", RE_FLAGS)
+
+ def parse(self, vstring):
+ match = self.version_re.match(vstring)
+ if not match:
+ raise ValueError("invalid version number '%s'" % vstring)
+
+ (major, minor, patch, prerelease, prerelease_num) = match.group(1, 2, 4, 5, 6)
+
+ if patch:
+ self.version = tuple(map(int, [major, minor, patch]))
+ else:
+ self.version = tuple(map(int, [major, minor])) + (0,)
+
+ if prerelease:
+ self.prerelease = (prerelease[0], int(prerelease_num))
+ else:
+ self.prerelease = None
+
+ def __str__(self):
+ if self.version[2] == 0:
+ vstring = ".".join(map(str, self.version[0:2]))
+ else:
+ vstring = ".".join(map(str, self.version))
+
+ if self.prerelease:
+ vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
+
+ return vstring
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = StrictVersion(other)
+ elif not isinstance(other, StrictVersion):
+ return NotImplemented
+
+ if self.version != other.version:
+ # numeric versions don't match
+ # prerelease stuff doesn't matter
+ if self.version < other.version:
+ return -1
+ else:
+ return 1
+
+ # have to compare prerelease
+ # case 1: neither has prerelease; they're equal
+ # case 2: self has prerelease, other doesn't; other is greater
+ # case 3: self doesn't have prerelease, other does: self is greater
+ # case 4: both have prerelease: must compare them!
+
+ if not self.prerelease and not other.prerelease:
+ return 0
+ elif self.prerelease and not other.prerelease:
+ return -1
+ elif not self.prerelease and other.prerelease:
+ return 1
+ elif self.prerelease and other.prerelease:
+ if self.prerelease == other.prerelease:
+ return 0
+ elif self.prerelease < other.prerelease:
+ return -1
+ else:
+ return 1
+ else:
+ raise AssertionError("never get here")
+
+
+# end class StrictVersion
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separated by a period or by
+# sequences of letters. If only periods, then these are compared
+# left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+# compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison. This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave. There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+# - indicating a "pre-release" version
+# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+# - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking him.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number. The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison. This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct. Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind. The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above. This works great as long
+# as everyone can go along with bondage and discipline. Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination. The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings. This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++". I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2". That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers. It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception). But I'd rather
+# have a conception that matches common notions about version numbers.
+
+
+class LooseVersion(Version):
+ """Version numbering for anarchists and software realists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of a series of numbers,
+ separated by either periods or strings of letters. When comparing
+ version numbers, the numeric components will be compared
+ numerically, and the alphabetic components lexically. The following
+ are all valid version numbers, in no particular order:
+
+ 1.5.1
+ 1.5.2b2
+ 161
+ 3.10a
+ 8.02
+ 3.4j
+ 1996.07.12
+ 3.2.pl0
+ 3.1.1.6
+ 2g6
+ 11g
+ 0.960923
+ 2.2beta29
+ 1.13++
+ 5.5.kw
+ 2.0b1pl0
+
+ In fact, there is no such thing as an invalid version number under
+ this scheme; the rules for comparison are simple and predictable,
+ but may not always give the results you want (for some definition
+ of "want").
+ """
+
+ component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def parse(self, vstring):
+ # I've given up on thinking I can reconstruct the version string
+ # from the parsed tuple -- so I just store the string here for
+ # use by __str__
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring) if x and x != "."]
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+ def __str__(self):
+ return self.vstring
+
+ def __repr__(self):
+ return "LooseVersion ('%s')" % str(self)
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = LooseVersion(other)
+ elif not isinstance(other, LooseVersion):
+ return NotImplemented
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
+
+
+# end class LooseVersion
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
new file mode 100644
index 000000000..81c65507e
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
+# on behalf of Telstra Corporation Limited
+#
+# Common functionality to be used by the modules:
+# - acm_certificate
+# - acm_certificate_info
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Common Amazon Certificate Manager facts shared between modules
+"""
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from .core import is_boto3_error_code
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+
+
+class ACMServiceManager(object):
+ """Handles ACM Facts Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('acm')
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
+ def delete_certificate_with_backoff(self, client, arn):
+ client.delete_certificate(CertificateArn=arn)
+
+ def delete_certificate(self, client, module, arn):
+ module.debug("Attempting to delete certificate %s" % arn)
+ try:
+ self.delete_certificate_with_backoff(client, arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn)
+ module.debug("Successfully deleted certificate %s" % arn)
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
+ def list_certificates_with_backoff(self, client, statuses=None):
+ paginator = client.get_paginator('list_certificates')
+ kwargs = dict()
+ if statuses:
+ kwargs['CertificateStatuses'] = statuses
+ return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList']
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
+ def get_certificate_with_backoff(self, client, certificate_arn):
+ response = client.get_certificate(CertificateArn=certificate_arn)
+ # strip out response metadata
+ return {'Certificate': response['Certificate'],
+ 'CertificateChain': response['CertificateChain']}
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
+ def describe_certificate_with_backoff(self, client, certificate_arn):
+ return client.describe_certificate(CertificateArn=certificate_arn)['Certificate']
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
+ def list_certificate_tags_with_backoff(self, client, certificate_arn):
+ return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags']
+
+ # Returns a list of certificates
+ # if domain_name is specified, returns only certificates with that domain
+ # if an ARN is specified, returns only that certificate
+ # only_tags is a dict, e.g. {'key':'value'}. If specified this function will return
+ # only certificates which contain all those tags (key exists, value matches).
+ def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None):
+ try:
+ all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain certificates")
+ if domain_name:
+ certificates = [cert for cert in all_certificates
+ if cert['DomainName'] == domain_name]
+ else:
+ certificates = all_certificates
+
+ if arn:
+ # still return a list, not just one item
+ certificates = [c for c in certificates if c['CertificateArn'] == arn]
+
+ results = []
+ for certificate in certificates:
+ try:
+ cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn'])
+ except is_boto3_error_code('ResourceNotFoundException'):
+ # The certificate was deleted after the call to list_certificates_with_backoff.
+ continue
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName'])
+
+ # in some states, ACM resources do not have a corresponding cert
+ if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']:
+ try:
+ cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn']))
+ except is_boto3_error_code('ResourceNotFoundException'):
+ # The certificate was deleted after the call to list_certificates_with_backoff.
+ continue
+ except (BotoCoreError, ClientError, KeyError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName'])
+ cert_data = camel_dict_to_snake_dict(cert_data)
+ try:
+ tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn'])
+ except is_boto3_error_code('ResourceNotFoundException'):
+ # The certificate was deleted after the call to list_certificates_with_backoff.
+ continue
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName'])
+
+ cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags)
+ results.append(cert_data)
+
+ if only_tags:
+ for tag_key in only_tags:
+ try:
+ results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])]
+ except (TypeError, AttributeError) as e:
+ for c in results:
+ if 'tags' not in c:
+ module.debug("cert is %s" % str(c))
+ module.fail_json(msg="ACM tag filtering err", exception=e)
+
+ return results
+
+ # returns the domain name of a certificate (encoded in the public cert)
+ # for a given ARN
+ # A cert with that ARN must already exist
+ def get_domain_of_cert(self, client, module, arn):
+ if arn is None:
+ module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified")
+ try:
+ cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn)
+ return cert_data['DomainName']
+
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
+ def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn):
+ if certificate_chain:
+ if arn:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateChain=to_bytes(certificate_chain),
+ CertificateArn=arn)
+ else:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateChain=to_bytes(certificate_chain))
+ else:
+ if arn:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key),
+ CertificateArn=arn)
+ else:
+ ret = client.import_certificate(Certificate=to_bytes(certificate),
+ PrivateKey=to_bytes(private_key))
+ return ret['CertificateArn']
+
+ # Tags are a normal Ansible style dict
+ # {'Key':'Value'}
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
+ def tag_certificate_with_backoff(self, client, arn, tags):
+ aws_tags = ansible_dict_to_boto3_tag_list(tags)
+ client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags)
+
+ def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None):
+
+ original_arn = arn
+
+ # upload cert
+ try:
+ arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't upload new certificate")
+
+ if original_arn and (arn != original_arn):
+ # I'm not sure whether the API guarentees that the ARN will not change
+ # I'm failing just in case.
+ # If I'm wrong, I'll catch it in the integration tests.
+ module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn))
+
+ # tag that cert
+ try:
+ self.tag_certificate_with_backoff(client, arn, tags)
+ except (BotoCoreError, ClientError) as e:
+ module.debug("Attempting to delete the cert we just created, arn=%s" % arn)
+ try:
+ self.delete_certificate_with_backoff(client, arn)
+ except (BotoCoreError, ClientError):
+ module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.")
+ module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn)
+ module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn)
+
+ return arn
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/arn.py b/ansible_collections/amazon/aws/plugins/module_utils/arn.py
new file mode 100644
index 000000000..ac8dfc9e0
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/arn.py
@@ -0,0 +1,69 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+def parse_aws_arn(arn):
+ """
+ The following are the general formats for ARNs.
+ arn:partition:service:region:account-id:resource-id
+ arn:partition:service:region:account-id:resource-type/resource-id
+ arn:partition:service:region:account-id:resource-type:resource-id
+ The specific formats depend on the resource.
+ The ARNs for some resources omit the Region, the account ID, or both the Region and the account ID.
+ """
+ m = re.search(r"arn:(aws(-([a-z\-]+))?):([\w-]+):([a-z0-9\-]*):(\d*|aws|aws-managed):(.*)", arn)
+ if m is None:
+ return None
+ result = dict()
+ result.update(dict(partition=m.group(1)))
+ result.update(dict(service=m.group(4)))
+ result.update(dict(region=m.group(5)))
+ result.update(dict(account_id=m.group(6)))
+ result.update(dict(resource=m.group(7)))
+
+ return result
+
+
+# An implementation of this used was originally in ec2.py, however Outposts
+# aren't specific to the EC2 service
+def is_outpost_arn(arn):
+ """
+ Validates that the ARN is for an AWS Outpost
+
+
+ API Specification Document:
+ https://docs.aws.amazon.com/outposts/latest/APIReference/API_Outpost.html
+ """
+ details = parse_aws_arn(arn)
+
+ if not details:
+ return False
+
+ service = details.get('service') or ""
+ if service.lower() != 'outposts':
+ return False
+ resource = details.get('resource') or ""
+ if not re.match('^outpost/op-[a-f0-9]{17}$', resource):
+ return False
+
+ return True
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/batch.py b/ansible_collections/amazon/aws/plugins/module_utils/batch.py
new file mode 100644
index 000000000..c27214519
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/batch.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2017 Ansible Project
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Batch modules.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+
+def cc(key):
+ """
+ Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes
+ 'computeEnvironmentName'.
+
+ :param key:
+ :return:
+ """
+ components = key.split('_')
+ return components[0] + "".join([token.capitalize() for token in components[1:]])
+
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+ :param module:
+ :param module_params:
+ :return:
+ """
+ api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
+ return snake_dict_to_camel_dict(api_params)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
new file mode 100644
index 000000000..a8a014c20
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
@@ -0,0 +1,394 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+A set of helper functions designed to help with initializing boto3/botocore
+connections.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import traceback
+
+BOTO3_IMP_ERR = None
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ BOTO3_IMP_ERR = traceback.format_exc()
+ HAS_BOTO3 = False
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six import binary_type
+from ansible.module_utils.six import text_type
+
+from .retries import AWSRetry
+
+
+def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
+ """
+ Builds a boto3 resource/client connection cleanly wrapping the most common failures.
+ Handles:
+ ValueError,
+ botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError,
+ botocore.exceptions.NoRegionError
+ """
+ try:
+ return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
+ except ValueError as e:
+ module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
+ module.fail_json(msg=to_native(e))
+ except botocore.exceptions.NoRegionError:
+ module.fail_json(msg="The %s module requires a region and none was found in configuration, "
+ "environment variables or module parameters" % module._name)
+
+
+def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
+ """
+ Builds a boto3 resource/client connection cleanly wrapping the most common failures.
+ No exceptions are caught/handled.
+ """
+ profile = params.pop('profile_name', None)
+
+ if conn_type not in ['both', 'resource', 'client']:
+ raise ValueError('There is an issue in the calling code. You '
+ 'must specify either both, resource, or client to '
+ 'the conn_type parameter in the boto3_conn function '
+ 'call')
+
+ config = botocore.config.Config(
+ user_agent_extra='Ansible/{0}'.format(__version__),
+ )
+
+ if params.get('config') is not None:
+ config = config.merge(params.pop('config'))
+ if params.get('aws_config') is not None:
+ config = config.merge(params.pop('aws_config'))
+
+ session = boto3.session.Session(
+ profile_name=profile,
+ )
+
+ enable_placebo(session)
+
+ if conn_type == 'resource':
+ return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ elif conn_type == 'client':
+ return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ else:
+ client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
+ resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ return client, resource
+
+
+# Inventory plugins don't have access to the same 'module', they need to throw
+# an exception rather than calling module.fail_json
+boto3_inventory_conn = _boto3_conn
+
+
+def boto_exception(err):
+ """
+ Extracts the error message from a boto exception.
+
+ :param err: Exception from boto
+ :return: Error message
+ """
+ if hasattr(err, 'error_message'):
+ error = err.error_message
+ elif hasattr(err, 'message'):
+ error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
+ else:
+ error = '%s: %s' % (Exception, err)
+
+ return error
+
+
+def get_aws_region(module, boto3=None):
+ region = module.params.get('region')
+
+ if region:
+ return region
+
+ if not HAS_BOTO3:
+ module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
+
+ # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
+ try:
+ profile_name = module.params.get('profile')
+ return botocore.session.Session(profile=profile_name).get_config_variable('region')
+ except botocore.exceptions.ProfileNotFound:
+ return None
+
+
+def get_aws_connection_info(module, boto3=None):
+
+ # Check module args for credentials, then check environment vars
+ # access_key
+
+ endpoint_url = module.params.get('endpoint_url')
+ access_key = module.params.get('access_key')
+ secret_key = module.params.get('secret_key')
+ session_token = module.params.get('session_token')
+ region = get_aws_region(module)
+ profile_name = module.params.get('profile')
+ validate_certs = module.params.get('validate_certs')
+ ca_bundle = module.params.get('aws_ca_bundle')
+ config = module.params.get('aws_config')
+
+ # Only read the profile environment variables if we've *not* been passed
+ # any credentials as parameters.
+ if not profile_name and not access_key and not secret_key:
+ if os.environ.get('AWS_PROFILE'):
+ profile_name = os.environ.get('AWS_PROFILE')
+ if os.environ.get('AWS_DEFAULT_PROFILE'):
+ profile_name = os.environ.get('AWS_DEFAULT_PROFILE')
+
+ if profile_name and (access_key or secret_key or session_token):
+ module.fail_json(msg="Passing both a profile and access tokens is not supported.")
+
+ # Botocore doesn't like empty strings, make sure we default to None in the case of an empty
+ # string.
+ if not access_key:
+ # AWS_ACCESS_KEY_ID is the one supported by the AWS CLI
+ # AWS_ACCESS_KEY is to match up with our parameter name
+ if os.environ.get('AWS_ACCESS_KEY_ID'):
+ access_key = os.environ['AWS_ACCESS_KEY_ID']
+ elif os.environ.get('AWS_ACCESS_KEY'):
+ access_key = os.environ['AWS_ACCESS_KEY']
+ # Deprecated - 'EC2' implies just EC2, but is global
+ elif os.environ.get('EC2_ACCESS_KEY'):
+ access_key = os.environ['EC2_ACCESS_KEY']
+ else:
+ # in case access_key came in as empty string
+ access_key = None
+
+ if not secret_key:
+ # AWS_SECRET_ACCESS_KEY is the one supported by the AWS CLI
+ # AWS_SECRET_KEY is to match up with our parameter name
+ if os.environ.get('AWS_SECRET_ACCESS_KEY'):
+ secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
+ elif os.environ.get('AWS_SECRET_KEY'):
+ secret_key = os.environ['AWS_SECRET_KEY']
+ # Deprecated - 'EC2' implies just EC2, but is global
+ elif os.environ.get('EC2_SECRET_KEY'):
+ secret_key = os.environ['EC2_SECRET_KEY']
+ else:
+ # in case secret_key came in as empty string
+ secret_key = None
+
+ if not session_token:
+ # AWS_SESSION_TOKEN is supported by the AWS CLI
+ if os.environ.get('AWS_SESSION_TOKEN'):
+ session_token = os.environ['AWS_SESSION_TOKEN']
+ # Deprecated - boto
+ elif os.environ.get('AWS_SECURITY_TOKEN'):
+ session_token = os.environ['AWS_SECURITY_TOKEN']
+ # Deprecated - 'EC2' implies just EC2, but is global
+ elif os.environ.get('EC2_SECURITY_TOKEN'):
+ session_token = os.environ['EC2_SECURITY_TOKEN']
+ else:
+ # in case secret_token came in as empty string
+ session_token = None
+
+ if profile_name:
+ boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
+ boto_params['profile_name'] = profile_name
+ else:
+ boto_params = dict(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ aws_session_token=session_token,
+ )
+
+ if validate_certs and ca_bundle:
+ boto_params['verify'] = ca_bundle
+ else:
+ boto_params['verify'] = validate_certs
+
+ if config is not None:
+ boto_params['aws_config'] = botocore.config.Config(**config)
+
+ for param, value in boto_params.items():
+ if isinstance(value, binary_type):
+ boto_params[param] = text_type(value, 'utf-8', 'strict')
+
+ return region, endpoint_url, boto_params
+
+
+def _paginated_query(client, paginator_name, **params):
+ paginator = client.get_paginator(paginator_name)
+ result = paginator.paginate(**params).build_full_result()
+ return result
+
+
+def paginated_query_with_retries(client, paginator_name, retry_decorator=None, **params):
+ """
+ Performs a boto3 paginated query.
+ By default uses uses AWSRetry.jittered_backoff(retries=10) to retry queries
+ with temporary failures.
+
+ Examples:
+ tags = paginated_query_with_retries(client, "describe_tags", Filters=[])
+
+ decorator = AWSRetry.backoff(tries=5, delay=5, backoff=2.0,
+ catch_extra_error_codes=['RequestInProgressException'])
+ certificates = paginated_query_with_retries(client, "list_certificates", decorator)
+ """
+ if retry_decorator is None:
+ retry_decorator = AWSRetry.jittered_backoff(retries=10)
+ result = retry_decorator(_paginated_query)(client, paginator_name, **params)
+ return result
+
+
+def gather_sdk_versions():
+ """Gather AWS SDK (boto3 and botocore) dependency versions
+
+ Returns {'boto3_version': str, 'botocore_version': str}
+ Returns {} if either module is not installed
+ """
+ if not HAS_BOTO3:
+ return {}
+ import boto3
+ import botocore
+ return dict(boto3_version=boto3.__version__,
+ botocore_version=botocore.__version__)
+
+
+def is_boto3_error_code(code, e=None):
+ """Check if the botocore exception is raised by a specific error code.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_instances(InstanceIds=['potato'])
+ except is_boto3_error_code('InvalidInstanceID.Malformed'):
+ # handle the error for that code case
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if not isinstance(code, list):
+ code = [code]
+ if isinstance(e, ClientError) and e.response['Error']['Code'] in code:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def is_boto3_error_message(msg, e=None):
+ """Check if the botocore exception contains a specific error message.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_vpc_classic_link(VpcIds=[vpc_id])
+ except is_boto3_error_message('The functionality you requested is not available in this region.'):
+ # handle the error for that error message
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if isinstance(e, ClientError) and msg in e.response['Error']['Message']:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def get_boto3_client_method_parameters(client, method_name, required=False):
+ op = client.meta.method_to_api_mapping.get(method_name)
+ input_shape = client._service_model.operation_model(op).input_shape
+ if not input_shape:
+ parameters = []
+ elif required:
+ parameters = list(input_shape.required_members)
+ else:
+ parameters = list(input_shape.members.keys())
+ return parameters
+
+
+# Used by normalize_boto3_result
+def _boto3_handler(obj):
+ if hasattr(obj, 'isoformat'):
+ return obj.isoformat()
+ else:
+ return obj
+
+
+def normalize_boto3_result(result):
+ """
+ Because Boto3 returns datetime objects where it knows things are supposed to
+ be dates we need to mass-convert them over to strings which Ansible/Jinja
+ handle better. This also makes it easier to compare complex objects which
+ include a mix of dates in string format (from parameters) and dates as
+ datetime objects. Boto3 is happy to be passed ISO8601 format strings.
+ """
+ return json.loads(json.dumps(result, default=_boto3_handler))
+
+
+def enable_placebo(session):
+ """
+ Helper to record or replay offline modules for testing purpose.
+ """
+ if "_ANSIBLE_PLACEBO_RECORD" in os.environ:
+ import placebo
+ existing_entries = os.listdir(os.environ["_ANSIBLE_PLACEBO_RECORD"])
+ idx = len(existing_entries)
+ data_path = f"{os.environ['_ANSIBLE_PLACEBO_RECORD']}/{idx}"
+ os.mkdir(data_path)
+ pill = placebo.attach(session, data_path=data_path)
+ pill.record()
+ if "_ANSIBLE_PLACEBO_REPLAY" in os.environ:
+ import shutil
+ import placebo
+ existing_entries = sorted([int(i) for i in os.listdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])])
+ idx = str(existing_entries[0])
+ data_path = os.environ['_ANSIBLE_PLACEBO_REPLAY'] + "/" + idx
+ try:
+ shutil.rmtree("_tmp")
+ except FileNotFoundError:
+ pass
+ shutil.move(data_path, "_tmp")
+ if len(existing_entries) == 1:
+ os.rmdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])
+ pill = placebo.attach(session, data_path="_tmp")
+ pill.playback()
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
new file mode 100644
index 000000000..e690c0a86
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2021 Ansible Project
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+import functools
+import random
+import ansible.module_utils.common.warnings as ansible_warnings
+
+
+class BackoffIterator:
+ """iterate sleep value based on the exponential or jitter back-off algorithm.
+ Args:
+ delay (int or float): initial delay.
+ backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry.
+ max_delay (int or None): maximum amount of time to wait between retries.
+ jitter (bool): if set to true, add jitter to the generate value.
+ """
+
+ def __init__(self, delay, backoff, max_delay=None, jitter=False):
+ self.delay = delay
+ self.backoff = backoff
+ self.max_delay = max_delay
+ self.jitter = jitter
+
+ def __iter__(self):
+ self.current_delay = self.delay
+ return self
+
+ def __next__(self):
+ return_value = self.current_delay if self.max_delay is None else min(self.current_delay, self.max_delay)
+ if self.jitter:
+ return_value = random.uniform(0.0, return_value)
+ self.current_delay *= self.backoff
+ return return_value
+
+
+def _retry_func(func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class):
+ counter = 0
+ for sleep_time in sleep_time_generator:
+ try:
+ return func()
+ except Exception as exc: # pylint: disable=broad-except
+ counter += 1
+ if counter == retries:
+ raise
+ if base_class and not isinstance(exc, base_class):
+ raise
+ status_code = status_code_from_except_f(exc)
+ if found_f(status_code, catch_extra_error_codes):
+ time.sleep(sleep_time)
+ else:
+ raise
+
+
+class CloudRetry:
+ """
+ The base class to be used by other cloud providers to provide a backoff/retry decorator based on status codes.
+ """
+
+ base_class = type(None)
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """
+ Returns the Error 'code' from an exception.
+ Args:
+ error: The Exception from which the error code is to be extracted.
+ error will be an instance of class.base_class.
+ """
+ raise NotImplementedError()
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ def _is_iterable():
+ try:
+ iter(catch_extra_error_codes)
+ except TypeError:
+ # not iterable
+ return False
+ else:
+ # iterable
+ return True
+ return _is_iterable() and response_code in catch_extra_error_codes
+
+ @classmethod
+ def base_decorator(cls, retries, found, status_code_from_exception, catch_extra_error_codes, sleep_time_generator):
+ def retry_decorator(func):
+ @functools.wraps(func)
+ def _retry_wrapper(*args, **kwargs):
+ partial_func = functools.partial(func, *args, **kwargs)
+ return _retry_func(
+ func=partial_func,
+ sleep_time_generator=sleep_time_generator,
+ retries=retries,
+ catch_extra_error_codes=catch_extra_error_codes,
+ found_f=found,
+ status_code_from_except_f=status_code_from_exception,
+ base_class=cls.base_class,
+ )
+ return _retry_wrapper
+ return retry_decorator
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """Wrap a callable with retry behavior.
+ Args:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry
+ default=2
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry
+ default=None
+ Returns:
+ Callable: A generator that calls the decorated function using an exponential backoff.
+ """
+ sleep_time_generator = BackoffIterator(delay=delay, backoff=backoff, max_delay=max_delay)
+ return cls.base_decorator(
+ retries=retries,
+ found=cls.found,
+ status_code_from_exception=cls.status_code_from_exception,
+ catch_extra_error_codes=catch_extra_error_codes,
+ sleep_time_generator=sleep_time_generator,
+ )
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, backoff=2.0, max_delay=60, catch_extra_error_codes=None):
+ """Wrap a callable with retry behavior.
+ Args:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry
+ default=2.0
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry
+ default=None
+ Returns:
+ Callable: A generator that calls the decorated function using using a jittered backoff strategy.
+ """
+ sleep_time_generator = BackoffIterator(delay=delay, backoff=backoff, max_delay=max_delay, jitter=True)
+ return cls.base_decorator(
+ retries=retries,
+ found=cls.found,
+ status_code_from_exception=cls.status_code_from_exception,
+ catch_extra_error_codes=catch_extra_error_codes,
+ sleep_time_generator=sleep_time_generator,
+ )
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Wrap a callable with retry behavior.
+ Developers should use CloudRetry.exponential_backoff instead.
+ This method has been deprecated and will be removed in release 6.0.0, consider using exponential_backoff method instead.
+ Args:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry
+ default=1.1
+ catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry
+ default=None
+ Returns:
+ Callable: A generator that calls the decorated function using an exponential backoff.
+ """
+ # This won't emit a warning (we don't have the context available to us), but will trigger
+ # sanity failures as we prepare for 6.0.0
+ ansible_warnings.deprecate(
+ 'CloudRetry.backoff has been deprecated, please use CloudRetry.exponential_backoff instead',
+ version='6.0.0', collection_name='amazon.aws')
+
+ return cls.exponential_backoff(
+ retries=tries,
+ delay=delay,
+ backoff=backoff,
+ max_delay=None,
+ catch_extra_error_codes=catch_extra_error_codes,
+ )
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
new file mode 100644
index 000000000..c628bff14
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Willem van Ketwich
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# - Willem van Ketwich <willem@vanketwich.com.au>
+#
+# Common functionality to be used by the modules:
+# - cloudfront_distribution
+# - cloudfront_invalidation
+# - cloudfront_origin_access_identity
+"""
+Common cloudfront facts shared between modules
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+from .ec2 import boto3_tag_list_to_ansible_dict
+
+
+class CloudFrontFactsServiceManager(object):
+ """Handles CloudFront Facts Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff())
+
+ def get_distribution(self, distribution_id):
+ try:
+ return self.client.get_distribution(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing distribution")
+
+ def get_distribution_config(self, distribution_id):
+ try:
+ return self.client.get_distribution_config(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing distribution configuration")
+
+ def get_origin_access_identity(self, origin_access_identity_id):
+ try:
+ return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing origin access identity")
+
+ def get_origin_access_identity_config(self, origin_access_identity_id):
+ try:
+ return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
+
+ def get_invalidation(self, distribution_id, invalidation_id):
+ try:
+ return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing invalidation")
+
+ def get_streaming_distribution(self, distribution_id):
+ try:
+ return self.client.get_streaming_distribution(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing streaming distribution")
+
+ def get_streaming_distribution_config(self, distribution_id):
+ try:
+ return self.client.get_streaming_distribution_config(Id=distribution_id, aws_retry=True)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error describing streaming distribution")
+
+ def list_origin_access_identities(self):
+ try:
+ paginator = self.client.get_paginator('list_cloud_front_origin_access_identities')
+ result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {})
+ return result.get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
+
+ def list_distributions(self, keyed=True):
+ try:
+ paginator = self.client.get_paginator('list_distributions')
+ result = paginator.paginate().build_full_result().get('DistributionList', {})
+ distribution_list = result.get('Items', [])
+ if not keyed:
+ return distribution_list
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing distributions")
+
+ def list_distributions_by_web_acl_id(self, web_acl_id):
+ try:
+ result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id, aws_retry=True)
+ distribution_list = result.get('DistributionList', {}).get('Items', [])
+ return self.keyed_list_helper(distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
+
+ def list_invalidations(self, distribution_id):
+ try:
+ paginator = self.client.get_paginator('list_invalidations')
+ result = paginator.paginate(DistributionId=distribution_id).build_full_result()
+ return result.get('InvalidationList', {}).get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing invalidations")
+
+ def list_streaming_distributions(self, keyed=True):
+ try:
+ paginator = self.client.get_paginator('list_streaming_distributions')
+ result = paginator.paginate().build_full_result()
+ streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', [])
+ if not keyed:
+ return streaming_distribution_list
+ return self.keyed_list_helper(streaming_distribution_list)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error listing streaming distributions")
+
+ def summary(self):
+ summary_dict = {}
+ summary_dict.update(self.summary_get_distribution_list(False))
+ summary_dict.update(self.summary_get_distribution_list(True))
+ summary_dict.update(self.summary_get_origin_access_identity_list())
+ return summary_dict
+
+ def summary_get_origin_access_identity_list(self):
+ try:
+ origin_access_identity_list = {'origin_access_identities': []}
+ origin_access_identities = self.list_origin_access_identities()
+ for origin_access_identity in origin_access_identities:
+ oai_id = origin_access_identity['Id']
+ oai_full_response = self.get_origin_access_identity(oai_id)
+ oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
+ origin_access_identity_list['origin_access_identities'].append(oai_summary)
+ return origin_access_identity_list
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
+
+ def summary_get_distribution_list(self, streaming=False):
+ try:
+ list_name = 'streaming_distributions' if streaming else 'distributions'
+ key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
+ distribution_list = {list_name: []}
+ distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
+ for dist in distributions:
+ temp_distribution = {}
+ for key_name in key_list:
+ temp_distribution[key_name] = dist[key_name]
+ temp_distribution['Aliases'] = list(dist['Aliases'].get('Items', []))
+ temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
+ if not streaming:
+ temp_distribution['WebACLId'] = dist['WebACLId']
+ invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
+ if invalidation_ids:
+ temp_distribution['Invalidations'] = invalidation_ids
+ resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'], aws_retry=True)
+ temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
+ distribution_list[list_name].append(temp_distribution)
+ return distribution_list
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self.module.fail_json_aws(e, msg="Error generating summary of distributions")
+
+ def get_etag_from_distribution_id(self, distribution_id, streaming):
+ distribution = {}
+ if not streaming:
+ distribution = self.get_distribution(distribution_id)
+ else:
+ distribution = self.get_streaming_distribution(distribution_id)
+ return distribution['ETag']
+
+ def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
+ try:
+ invalidation_ids = []
+ invalidations = self.list_invalidations(distribution_id)
+ for invalidation in invalidations:
+ invalidation_ids.append(invalidation['Id'])
+ return invalidation_ids
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
+
+ def get_distribution_id_from_domain_name(self, domain_name):
+ try:
+ distribution_id = ""
+ distributions = self.list_distributions(False)
+ distributions += self.list_streaming_distributions(False)
+ for dist in distributions:
+ if 'Items' in dist['Aliases']:
+ for alias in dist['Aliases']['Items']:
+ if str(alias).lower() == domain_name.lower():
+ distribution_id = dist['Id']
+ break
+ return distribution_id
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
+
+ def get_aliases_from_distribution_id(self, distribution_id):
+ try:
+ distribution = self.get_distribution(distribution_id)
+ return distribution['DistributionConfig']['Aliases'].get('Items', [])
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
+
+ def keyed_list_helper(self, list_to_key):
+ keyed_list = dict()
+ for item in list_to_key:
+ distribution_id = item['Id']
+ if 'Items' in item['Aliases']:
+ aliases = item['Aliases']['Items']
+ for alias in aliases:
+ keyed_list.update({alias: item})
+ keyed_list.update({distribution_id: item})
+ return keyed_list
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/core.py b/ansible_collections/amazon/aws/plugins/module_utils/core.py
new file mode 100644
index 000000000..bfd7fe101
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/core.py
@@ -0,0 +1,77 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module adds shared support for generic Amazon AWS modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+ module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
+ mutually_exclusive=list1, required_together=list2)
+
+The 'AnsibleAWSModule' module provides similar, but more restricted,
+interfaces to the normal Ansible module. It also includes the
+additional methods for connecting to AWS using the standard module arguments
+
+ m.resource('lambda') # - get an AWS connection as a boto3 resource.
+
+or
+
+ m.client('sts') # - get an AWS connection as a boto3 client.
+
+To make use of AWSRetry easier, it can now be wrapped around any call from a
+module-created client. To add retries to a client, create a client:
+
+ m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+ ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn
+from .arn import parse_aws_arn # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
+from .botocore import HAS_BOTO3 # pylint: disable=unused-import
+from .botocore import is_boto3_error_code # pylint: disable=unused-import
+from .botocore import is_boto3_error_message # pylint: disable=unused-import
+from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import
+from .botocore import normalize_boto3_result # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules
+from .modules import AnsibleAWSModule # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules
+from .transformation import scrub_none_parameters # pylint: disable=unused-import
+
+# We will also export HAS_BOTO3 so end user modules can use it.
+__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code', 'is_boto3_error_message')
+
+
+class AnsibleAWSError(Exception):
+ pass
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
new file mode 100644
index 000000000..abcbcfd23
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2017 Ansible Project
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Direct Connect modules.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+
+
+class DirectConnectError(Exception):
+ def __init__(self, msg, last_traceback=None, exception=None):
+ self.msg = msg
+ self.last_traceback = last_traceback
+ self.exception = exception
+
+
+def delete_connection(client, connection_id):
+ try:
+ AWSRetry.jittered_backoff()(client.delete_connection)(connectionId=connection_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def associate_connection_and_lag(client, connection_id, lag_id):
+ try:
+ AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id,
+ lagId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}"
+ " with link aggregation group {1}.".format(connection_id, lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def disassociate_connection_and_lag(client, connection_id, lag_id):
+ try:
+ AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id,
+ lagId=lag_id)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}"
+ " from link aggregation group {1}.".format(connection_id, lag_id),
+ last_traceback=traceback.format_exc(),
+ exception=e)
+
+
+def delete_virtual_interface(client, virtual_interface):
+ try:
+ AWSRetry.jittered_backoff()(client.delete_virtual_interface)(virtualInterfaceId=virtual_interface)
+ except botocore.exceptions.ClientError as e:
+ raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface),
+ last_traceback=traceback.format_exc(),
+ exception=e)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
new file mode 100644
index 000000000..817c12298
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
@@ -0,0 +1,310 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""
+This module adds helper functions for various EC2 specific services.
+
+It also includes a large number of imports for functions which historically
+lived here. Most of these functions were not specific to EC2, they ended
+up in this module because "that's where the AWS code was" (originally).
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+# Used to live here, moved into ansible.module_utils.common.dict_transformations
+from ansible.module_utils.common.dict_transformations import _camel_to_snake # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import _snake_to_camel # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict # pylint: disable=unused-import
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict # pylint: disable=unused-import
+
+# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.arn
+from .arn import is_outpost_arn as is_outposts_arn # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
+from .botocore import HAS_BOTO3 # pylint: disable=unused-import
+from .botocore import boto3_conn # pylint: disable=unused-import
+from .botocore import boto3_inventory_conn # pylint: disable=unused-import
+from .botocore import boto_exception # pylint: disable=unused-import
+from .botocore import get_aws_region # pylint: disable=unused-import
+from .botocore import get_aws_connection_info # pylint: disable=unused-import
+
+from .botocore import paginated_query_with_retries
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
+from .core import AnsibleAWSError # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules
+# The names have been changed in .modules to better reflect their applicability.
+from .modules import _aws_common_argument_spec as aws_common_argument_spec # pylint: disable=unused-import
+from .modules import aws_argument_spec as ec2_argument_spec # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.tagging
+from .tagging import ansible_dict_to_boto3_tag_list # pylint: disable=unused-import
+from .tagging import boto3_tag_list_to_ansible_dict # pylint: disable=unused-import
+from .tagging import compare_aws_tags # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.transformation
+from .transformation import ansible_dict_to_boto3_filter_list # pylint: disable=unused-import
+from .transformation import map_complex_type # pylint: disable=unused-import
+
+# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.policy
+from .policy import _py3cmp as py3cmp # pylint: disable=unused-import
+from .policy import compare_policies # pylint: disable=unused-import
+from .policy import sort_json_policy_dict # pylint: disable=unused-import
+
+# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.retries
+from .retries import AWSRetry # pylint: disable=unused-import
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by HAS_BOTO3
+
+
+def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=None):
+
+ """ Return list of security group IDs from security group names. Note that security group names are not unique
+ across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
+ will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
+ a try block
+ """
+
+ def get_sg_name(sg, boto3=None):
+ return str(sg['GroupName'])
+
+ def get_sg_id(sg, boto3=None):
+ return str(sg['GroupId'])
+
+ sec_group_id_list = []
+
+ if isinstance(sec_group_list, string_types):
+ sec_group_list = [sec_group_list]
+
+ # Get all security groups
+ if vpc_id:
+ filters = [
+ {
+ 'Name': 'vpc-id',
+ 'Values': [
+ vpc_id,
+ ]
+ }
+ ]
+ all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
+ else:
+ all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
+
+ unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
+ sec_group_name_list = list(set(sec_group_list) - set(unmatched))
+
+ if len(unmatched) > 0:
+ # If we have unmatched names that look like an ID, assume they are
+ sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
+ still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
+ if len(still_unmatched) > 0:
+ raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
+
+ sec_group_id_list += [get_sg_id(all_sg) for all_sg in all_sec_groups if get_sg_name(all_sg) in sec_group_name_list]
+
+ return sec_group_id_list
+
+
+def add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes=None):
+ """
+ Sets Tags on an EC2 resource.
+
+ :param client: an EC2 boto3 client
+ :param module: an AnsibleAWSModule object
+ :param resource_id: the identifier for the resource
+ :param tags_to_set: A dictionary of key/value pairs to set
+ :param retry_codes: additional boto3 error codes to trigger retries
+ """
+
+ if not tags_to_set:
+ return False
+ if module.check_mode:
+ return True
+
+ if not retry_codes:
+ retry_codes = []
+
+ try:
+ tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set)
+ AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(
+ client.create_tags
+ )(
+ Resources=[resource_id], Tags=tags_to_add
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to add tags {0} to {1}".format(tags_to_set, resource_id))
+ return True
+
+
+def remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes=None):
+ """
+ Removes Tags from an EC2 resource.
+
+ :param client: an EC2 boto3 client
+ :param module: an AnsibleAWSModule object
+ :param resource_id: the identifier for the resource
+ :param tags_to_unset: a list of tag keys to removes
+ :param retry_codes: additional boto3 error codes to trigger retries
+ """
+
+ if not tags_to_unset:
+ return False
+ if module.check_mode:
+ return True
+
+ if not retry_codes:
+ retry_codes = []
+
+ tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset]
+
+ try:
+ AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(
+ client.delete_tags
+ )(
+ Resources=[resource_id], Tags=tags_to_remove
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete tags {0} from {1}".format(tags_to_unset, resource_id))
+ return True
+
+
+def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_codes=None):
+ """
+ Performs a paginated search of EC2 resource tags.
+
+ :param client: an EC2 boto3 client
+ :param module: an AnsibleAWSModule object
+ :param resource_id: the identifier for the resource
+ :param resource_type: the type of the resource
+ :param retry_codes: additional boto3 error codes to trigger retries
+ """
+ filters = {'resource-id': resource_id}
+ if resource_type:
+ filters['resource-type'] = resource_type
+ filters = ansible_dict_to_boto3_filter_list(filters)
+
+ if not retry_codes:
+ retry_codes = []
+
+ try:
+ retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)
+ results = paginated_query_with_retries(client, 'describe_tags', retry_decorator=retry_decorator,
+ Filters=filters)
+ return boto3_tag_list_to_ansible_dict(results.get('Tags', None))
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to describe tags for EC2 Resource: {0}".format(resource_id))
+
+
+def ensure_ec2_tags(client, module, resource_id, resource_type=None, tags=None, purge_tags=True, retry_codes=None):
+ """
+ Updates the tags on an EC2 resource.
+
+ To remove all tags the tags parameter must be explicitly set to an empty dictionary.
+
+ :param client: an EC2 boto3 client
+ :param module: an AnsibleAWSModule object
+ :param resource_id: the identifier for the resource
+ :param resource_type: the type of the resource
+ :param tags: the Tags to apply to the resource
+ :param purge_tags: whether tags missing from the tag list should be removed
+ :param retry_codes: additional boto3 error codes to trigger retries
+ :return: changed: returns True if the tags are changed
+ """
+
+ if tags is None:
+ return False
+
+ if not retry_codes:
+ retry_codes = []
+
+ changed = False
+ current_tags = describe_ec2_tags(client, module, resource_id, resource_type, retry_codes)
+
+ tags_to_set, tags_to_unset = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if purge_tags and not tags:
+ tags_to_unset = current_tags
+
+ changed |= remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes)
+ changed |= add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes)
+
+ return changed
+
+
+def normalize_ec2_vpc_dhcp_config(option_config):
+ """
+ The boto2 module returned a config dict, but boto3 returns a list of dicts
+ Make the data we return look like the old way, so we don't break users.
+ This is also much more user-friendly.
+ boto3:
+ 'DhcpConfigurations': [
+ {'Key': 'domain-name', 'Values': [{'Value': 'us-west-2.compute.internal'}]},
+ {'Key': 'domain-name-servers', 'Values': [{'Value': 'AmazonProvidedDNS'}]},
+ {'Key': 'netbios-name-servers', 'Values': [{'Value': '1.2.3.4'}, {'Value': '5.6.7.8'}]},
+ {'Key': 'netbios-node-type', 'Values': [1]},
+ {'Key': 'ntp-servers', 'Values': [{'Value': '1.2.3.4'}, {'Value': '5.6.7.8'}]}
+ ],
+ The module historically returned:
+ "new_options": {
+ "domain-name": "ec2.internal",
+ "domain-name-servers": ["AmazonProvidedDNS"],
+ "netbios-name-servers": ["10.0.0.1", "10.0.1.1"],
+ "netbios-node-type": "1",
+ "ntp-servers": ["10.0.0.2", "10.0.1.2"]
+ },
+ """
+ config_data = {}
+
+ if len(option_config) == 0:
+ # If there is no provided config, return the empty dictionary
+ return config_data
+
+ for config_item in option_config:
+ # Handle single value keys
+ if config_item['Key'] == 'netbios-node-type':
+ if isinstance(config_item['Values'], integer_types):
+ config_data['netbios-node-type'] = str((config_item['Values']))
+ elif isinstance(config_item['Values'], list):
+ config_data['netbios-node-type'] = str((config_item['Values'][0]['Value']))
+ # Handle actual lists of values
+ for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']:
+ if config_item['Key'] == option:
+ config_data[option] = [val['Value'] for val in config_item['Values']]
+
+ return config_data
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
new file mode 100644
index 000000000..218052d2f
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from .core import is_boto3_error_code
+from .ec2 import AWSRetry
+
+
+def get_elb(connection, module, elb_name):
+ """
+ Get an ELB based on name. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_name: Name of load balancer to get
+ :return: boto3 ELB dict or None if not found
+ """
+ try:
+ return _get_elb(connection, module, elb_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+
+@AWSRetry.jittered_backoff()
+def _get_elb(connection, module, elb_name):
+ """
+ Get an ELB based on name using AWSRetry. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_name: Name of load balancer to get
+ :return: boto3 ELB dict or None if not found
+ """
+
+ try:
+ load_balancer_paginator = connection.get_paginator('describe_load_balancers')
+ return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0]
+ except is_boto3_error_code('LoadBalancerNotFound'):
+ return None
+
+
+def get_elb_listener(connection, module, elb_arn, listener_port):
+ """
+ Get an ELB listener based on the port provided. If not found, return None.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param elb_arn: ARN of the ELB to look at
+ :param listener_port: Port of the listener to look for
+ :return: boto3 ELB listener dict or None if not found
+ """
+
+ try:
+ listener_paginator = connection.get_paginator('describe_listeners')
+ listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ l = None
+
+ for listener in listeners:
+ if listener['Port'] == listener_port:
+ l = listener
+ break
+
+ return l
+
+
+def get_elb_listener_rules(connection, module, listener_arn):
+ """
+ Get rules for a particular ELB listener using the listener ARN.
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param listener_arn: ARN of the ELB listener
+ :return: boto3 ELB rules list
+ """
+
+ try:
+ return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+
+def convert_tg_name_to_arn(connection, module, tg_name):
+ """
+ Get ARN of a target group using the target group's name
+
+ :param connection: AWS boto3 elbv2 connection
+ :param module: Ansible module
+ :param tg_name: Name of the target group
+ :return: target group ARN string
+ """
+
+ try:
+ response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e)
+
+ tg_arn = response['TargetGroups'][0]['TargetGroupArn']
+
+ return tg_arn
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
new file mode 100644
index 000000000..04f6114e1
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
@@ -0,0 +1,1092 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+from copy import deepcopy
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass
+
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+from .ec2 import get_ec2_security_group_ids_from_names
+from .elb_utils import convert_tg_name_to_arn
+from .elb_utils import get_elb
+from .elb_utils import get_elb_listener
+from .waiters import get_waiter
+
+
+def _simple_forward_config_arn(config, parent_arn):
+ config = deepcopy(config)
+
+ stickiness = config.pop('TargetGroupStickinessConfig', {'Enabled': False})
+ # Stickiness options set, non default value
+ if stickiness != {'Enabled': False}:
+ return False
+
+ target_groups = config.pop('TargetGroups', [])
+
+ # non-default config left over, probably invalid
+ if config:
+ return False
+ # Multiple TGS, not simple
+ if len(target_groups) > 1:
+ return False
+
+ if not target_groups:
+ # with no TGs defined, but an ARN set, this is one of the minimum possible configs
+ return parent_arn or False
+
+ target_group = target_groups[0]
+ # We don't care about the weight with a single TG
+ target_group.pop('Weight', None)
+
+ target_group_arn = target_group.pop('TargetGroupArn', None)
+
+ # non-default config left over
+ if target_group:
+ return False
+
+ # We didn't find an ARN
+ if not (target_group_arn or parent_arn):
+ return False
+
+ # Only one
+ if not parent_arn:
+ return target_group_arn
+ if not target_group_arn:
+ return parent_arn
+
+ if parent_arn != target_group_arn:
+ return False
+
+ return target_group_arn
+
+
+# ForwardConfig may be optional if we've got a single TargetGroupArn entry
+def _prune_ForwardConfig(action):
+ """
+ Drops a redundant ForwardConfig where TargetGroupARN has already been set.
+ (So we can perform comparisons)
+ """
+ if action.get('Type', "") != 'forward':
+ return action
+ if "ForwardConfig" not in action:
+ return action
+
+ parent_arn = action.get('TargetGroupArn', None)
+ arn = _simple_forward_config_arn(action["ForwardConfig"], parent_arn)
+ if not arn:
+ return action
+
+ # Remove the redundant ForwardConfig
+ newAction = action.copy()
+ del newAction["ForwardConfig"]
+ newAction["TargetGroupArn"] = arn
+ return newAction
+
+
+# remove the client secret if UseExistingClientSecret, because aws won't return it
+# add default values when they are not requested
+def _prune_secret(action):
+ if action['Type'] != 'authenticate-oidc':
+ return action
+
+ if not action['AuthenticateOidcConfig'].get('Scope', False):
+ action['AuthenticateOidcConfig']['Scope'] = 'openid'
+
+ if not action['AuthenticateOidcConfig'].get('SessionTimeout', False):
+ action['AuthenticateOidcConfig']['SessionTimeout'] = 604800
+
+ if action['AuthenticateOidcConfig'].get('UseExistingClientSecret', False):
+ action['AuthenticateOidcConfig'].pop('ClientSecret', None)
+
+ return action
+
+
+# while AWS api also won't return UseExistingClientSecret key
+# it must be added, because it's requested and compared
+def _append_use_existing_client_secretn(action):
+ if action['Type'] != 'authenticate-oidc':
+ return action
+
+ action['AuthenticateOidcConfig']['UseExistingClientSecret'] = True
+
+ return action
+
+
+def _sort_actions(actions):
+ return sorted(actions, key=lambda x: x.get('Order', 0))
+
+
+class ElasticLoadBalancerV2(object):
+
+ def __init__(self, connection, module):
+
+ self.connection = connection
+ self.module = module
+ self.changed = False
+ self.new_load_balancer = False
+ self.scheme = module.params.get("scheme")
+ self.name = module.params.get("name")
+ self.subnet_mappings = module.params.get("subnet_mappings")
+ self.subnets = module.params.get("subnets")
+ self.deletion_protection = module.params.get("deletion_protection")
+ self.elb_ip_addr_type = module.params.get("ip_address_type")
+ self.wait = module.params.get("wait")
+
+ if module.params.get("tags") is not None:
+ self.tags = ansible_dict_to_boto3_tag_list(module.params.get("tags"))
+ else:
+ self.tags = None
+
+ self.purge_tags = module.params.get("purge_tags")
+
+ self.elb = get_elb(connection, module, self.name)
+ if self.elb is not None:
+ self.elb_attributes = self.get_elb_attributes()
+ self.elb_ip_addr_type = self.get_elb_ip_address_type()
+ self.elb['tags'] = self.get_elb_tags()
+ else:
+ self.elb_attributes = None
+
+ def wait_for_ip_type(self, elb_arn, ip_type):
+ """
+ Wait for load balancer to reach 'active' status
+
+ :param elb_arn: The load balancer ARN
+ :return:
+ """
+
+ if not self.wait:
+ return
+
+ waiter_names = {
+ 'ipv4': 'load_balancer_ip_address_type_ipv4',
+ 'dualstack': 'load_balancer_ip_address_type_dualstack',
+ }
+ if ip_type not in waiter_names:
+ return
+
+ try:
+ waiter = get_waiter(self.connection, waiter_names.get(ip_type))
+ waiter.wait(LoadBalancerArns=[elb_arn])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def wait_for_status(self, elb_arn):
+ """
+ Wait for load balancer to reach 'active' status
+
+ :param elb_arn: The load balancer ARN
+ :return:
+ """
+
+ if not self.wait:
+ return
+
+ try:
+ waiter = get_waiter(self.connection, 'load_balancer_available')
+ waiter.wait(LoadBalancerArns=[elb_arn])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def wait_for_deletion(self, elb_arn):
+ """
+ Wait for load balancer to reach 'active' status
+
+ :param elb_arn: The load balancer ARN
+ :return:
+ """
+
+ if not self.wait:
+ return
+
+ try:
+ waiter = get_waiter(self.connection, 'load_balancers_deleted')
+ waiter.wait(LoadBalancerArns=[elb_arn])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def get_elb_attributes(self):
+ """
+ Get load balancer attributes
+
+ :return:
+ """
+
+ try:
+ attr_list = AWSRetry.jittered_backoff()(
+ self.connection.describe_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes']
+
+ elb_attributes = boto3_tag_list_to_ansible_dict(attr_list)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ # Replace '.' with '_' in attribute key names to make it more Ansibley
+ return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items())
+
+ def get_elb_ip_address_type(self):
+ """
+ Retrieve load balancer ip address type using describe_load_balancers
+
+ :return:
+ """
+
+ return self.elb.get('IpAddressType', None)
+
+ def update_elb_attributes(self):
+ """
+ Update the elb_attributes parameter
+ :return:
+ """
+ self.elb_attributes = self.get_elb_attributes()
+
+ def get_elb_tags(self):
+ """
+ Get load balancer tags
+
+ :return:
+ """
+
+ try:
+ return AWSRetry.jittered_backoff()(
+ self.connection.describe_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def delete_tags(self, tags_to_delete):
+ """
+ Delete elb tags
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.remove_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def modify_tags(self):
+ """
+ Modify elb tags
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.add_tags
+ )(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def delete(self):
+ """
+ Delete elb
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.delete_load_balancer
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.wait_for_deletion(self.elb['LoadBalancerArn'])
+
+ self.changed = True
+
+ def compare_subnets(self):
+ """
+ Compare user subnets with current ELB subnets
+
+ :return: bool True if they match otherwise False
+ """
+
+ subnet_mapping_id_list = []
+ subnet_mappings = []
+
+ # Check if we're dealing with subnets or subnet_mappings
+ if self.subnets is not None:
+ # Convert subnets to subnet_mappings format for comparison
+ for subnet in self.subnets:
+ subnet_mappings.append({'SubnetId': subnet})
+
+ if self.subnet_mappings is not None:
+ # Use this directly since we're comparing as a mapping
+ subnet_mappings = self.subnet_mappings
+
+ # Build a subnet_mapping style struture of what's currently
+ # on the load balancer
+ for subnet in self.elb['AvailabilityZones']:
+ this_mapping = {'SubnetId': subnet['SubnetId']}
+ for address in subnet.get('LoadBalancerAddresses', []):
+ if 'AllocationId' in address:
+ this_mapping['AllocationId'] = address['AllocationId']
+ break
+
+ subnet_mapping_id_list.append(this_mapping)
+
+ return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings)
+
+ def modify_subnets(self):
+ """
+ Modify elb subnets to match module parameters
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.set_subnets
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def update(self):
+ """
+ Update the elb from AWS
+ :return:
+ """
+
+ self.elb = get_elb(self.connection, self.module, self.module.params.get("name"))
+ self.elb['tags'] = self.get_elb_tags()
+
+ def modify_ip_address_type(self, ip_addr_type):
+ """
+ Modify ELB ip address type
+ :return:
+ """
+ if ip_addr_type is None:
+ return
+ if self.elb_ip_addr_type == ip_addr_type:
+ return
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.set_ip_address_type
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], IpAddressType=ip_addr_type)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+ self.wait_for_ip_type(self.elb['LoadBalancerArn'], ip_addr_type)
+
+ def _elb_create_params(self):
+ # Required parameters
+ params = dict()
+ params['Name'] = self.name
+ params['Type'] = self.type
+
+ # Other parameters
+ if self.elb_ip_addr_type is not None:
+ params['IpAddressType'] = self.elb_ip_addr_type
+ if self.subnets is not None:
+ params['Subnets'] = self.subnets
+ if self.subnet_mappings is not None:
+ params['SubnetMappings'] = self.subnet_mappings
+ if self.tags:
+ params['Tags'] = self.tags
+ # Scheme isn't supported for GatewayLBs, so we won't add it here, even though we don't
+ # support them yet.
+
+ return params
+
+ def create_elb(self):
+ """
+ Create a load balancer
+ :return:
+ """
+
+ params = self._elb_create_params()
+
+ try:
+ self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
+ self.changed = True
+ self.new_load_balancer = True
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.wait_for_status(self.elb['LoadBalancerArn'])
+
+
+class ApplicationLoadBalancer(ElasticLoadBalancerV2):
+
+ def __init__(self, connection, connection_ec2, module):
+ """
+
+ :param connection: boto3 connection
+ :param module: Ansible module
+ """
+ super(ApplicationLoadBalancer, self).__init__(connection, module)
+
+ self.connection_ec2 = connection_ec2
+
+ # Ansible module parameters specific to ALBs
+ self.type = 'application'
+ if module.params.get('security_groups') is not None:
+ try:
+ self.security_groups = AWSRetry.jittered_backoff()(
+ get_ec2_security_group_ids_from_names
+ )(module.params.get('security_groups'), self.connection_ec2, boto3=True)
+ except ValueError as e:
+ self.module.fail_json(msg=str(e), exception=traceback.format_exc())
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+ else:
+ self.security_groups = module.params.get('security_groups')
+ self.access_logs_enabled = module.params.get("access_logs_enabled")
+ self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket")
+ self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix")
+ self.idle_timeout = module.params.get("idle_timeout")
+ self.http2 = module.params.get("http2")
+ self.http_desync_mitigation_mode = module.params.get("http_desync_mitigation_mode")
+ self.http_drop_invalid_header_fields = module.params.get("http_drop_invalid_header_fields")
+ self.http_x_amzn_tls_version_and_cipher_suite = module.params.get("http_x_amzn_tls_version_and_cipher_suite")
+ self.http_xff_client_port = module.params.get("http_xff_client_port")
+ self.waf_fail_open = module.params.get("waf_fail_open")
+
+ if self.elb is not None and self.elb['Type'] != 'application':
+ self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.")
+
+ def _elb_create_params(self):
+ params = super()._elb_create_params()
+
+ if self.security_groups is not None:
+ params['SecurityGroups'] = self.security_groups
+ params['Scheme'] = self.scheme
+
+ return params
+
+ def compare_elb_attributes(self):
+ """
+ Compare user attributes with current ELB attributes
+ :return: bool True if they match otherwise False
+ """
+
+ update_attributes = []
+ if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
+ update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
+ if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
+ update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
+ if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
+ update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
+ if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
+ update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+ if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
+ update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
+ if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
+ update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
+ if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \
+ self.elb_attributes['routing_http_desync_mitigation_mode']:
+ update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()})
+ if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \
+ self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']:
+ update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()})
+ if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \
+ self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']:
+ update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled',
+ 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()})
+ if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \
+ self.elb_attributes['routing_http_xff_client_port_enabled']:
+ update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()})
+ if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \
+ self.elb_attributes['waf_fail_open_enabled']:
+ update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()})
+
+ if update_attributes:
+ return False
+ else:
+ return True
+
+ def modify_elb_attributes(self):
+ """
+ Update Application ELB attributes if required
+
+ :return:
+ """
+
+ update_attributes = []
+
+ if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
+ update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
+ if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
+ update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
+ if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
+ update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
+ if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
+ update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+ if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
+ update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
+ if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
+ update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
+ if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \
+ self.elb_attributes['routing_http_desync_mitigation_mode']:
+ update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()})
+ if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \
+ self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']:
+ update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()})
+ if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \
+ self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']:
+ update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled',
+ 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()})
+ if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \
+ self.elb_attributes['routing_http_xff_client_port_enabled']:
+ update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()})
+ if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \
+ self.elb_attributes['waf_fail_open_enabled']:
+ update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()})
+
+ if update_attributes:
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.modify_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
+ self.changed = True
+ except (BotoCoreError, ClientError) as e:
+ # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
+ if self.new_load_balancer:
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ self.module.fail_json_aws(e)
+
+ def compare_security_groups(self):
+ """
+ Compare user security groups with current ELB security groups
+
+ :return: bool True if they match otherwise False
+ """
+
+ if set(self.elb['SecurityGroups']) != set(self.security_groups):
+ return False
+ else:
+ return True
+
+ def modify_security_groups(self):
+ """
+ Modify elb security groups to match module parameters
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.set_security_groups
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+
+class NetworkLoadBalancer(ElasticLoadBalancerV2):
+
+ def __init__(self, connection, connection_ec2, module):
+
+ """
+
+ :param connection: boto3 connection
+ :param module: Ansible module
+ """
+ super(NetworkLoadBalancer, self).__init__(connection, module)
+
+ self.connection_ec2 = connection_ec2
+
+ # Ansible module parameters specific to NLBs
+ self.type = 'network'
+ self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing')
+
+ if self.elb is not None and self.elb['Type'] != 'network':
+ self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.")
+
+ def _elb_create_params(self):
+ params = super()._elb_create_params()
+
+ params['Scheme'] = self.scheme
+
+ return params
+
+ def modify_elb_attributes(self):
+ """
+ Update Network ELB attributes if required
+
+ :return:
+ """
+
+ update_attributes = []
+
+ if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \
+ self.elb_attributes['load_balancing_cross_zone_enabled']:
+ update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()})
+ if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
+ update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+
+ if update_attributes:
+ try:
+ AWSRetry.jittered_backoff()(
+ self.connection.modify_load_balancer_attributes
+ )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
+ self.changed = True
+ except (BotoCoreError, ClientError) as e:
+ # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
+ if self.new_load_balancer:
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ self.module.fail_json_aws(e)
+
+ def modify_subnets(self):
+ """
+ Modify elb subnets to match module parameters (unsupported for NLB)
+ :return:
+ """
+
+ self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer')
+
+
+class ELBListeners(object):
+
+ def __init__(self, connection, module, elb_arn):
+
+ self.connection = connection
+ self.module = module
+ self.elb_arn = elb_arn
+ listeners = module.params.get("listeners")
+ if listeners is not None:
+ # Remove suboption argspec defaults of None from each listener
+ listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners]
+ self.listeners = self._ensure_listeners_default_action_has_arn(listeners)
+ self.current_listeners = self._get_elb_listeners()
+ self.purge_listeners = module.params.get("purge_listeners")
+ self.changed = False
+
+ def update(self):
+ """
+ Update the listeners for the ELB
+
+ :return:
+ """
+ self.current_listeners = self._get_elb_listeners()
+
+ def _get_elb_listeners(self):
+ """
+ Get ELB listeners
+
+ :return:
+ """
+
+ try:
+ listener_paginator = self.connection.get_paginator('describe_listeners')
+ return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def _ensure_listeners_default_action_has_arn(self, listeners):
+ """
+ If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and
+ replace the name.
+
+ :param listeners: a list of listener dicts
+ :return: the same list of dicts ensuring that each listener DefaultActions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
+ """
+
+ if not listeners:
+ listeners = []
+
+ fixed_listeners = []
+ for listener in listeners:
+ fixed_actions = []
+ for action in listener['DefaultActions']:
+ if 'TargetGroupName' in action:
+ action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection,
+ self.module,
+ action['TargetGroupName'])
+ del action['TargetGroupName']
+ fixed_actions.append(action)
+ listener['DefaultActions'] = fixed_actions
+ fixed_listeners.append(listener)
+
+ return fixed_listeners
+
+ def compare_listeners(self):
+ """
+
+ :return:
+ """
+ listeners_to_modify = []
+ listeners_to_delete = []
+ listeners_to_add = deepcopy(self.listeners)
+
+ # Check each current listener port to see if it's been passed to the module
+ for current_listener in self.current_listeners:
+ current_listener_passed_to_module = False
+ for new_listener in self.listeners[:]:
+ new_listener['Port'] = int(new_listener['Port'])
+ if current_listener['Port'] == new_listener['Port']:
+ current_listener_passed_to_module = True
+ # Remove what we match so that what is left can be marked as 'to be added'
+ listeners_to_add.remove(new_listener)
+ modified_listener = self._compare_listener(current_listener, new_listener)
+ if modified_listener:
+ modified_listener['Port'] = current_listener['Port']
+ modified_listener['ListenerArn'] = current_listener['ListenerArn']
+ listeners_to_modify.append(modified_listener)
+ break
+
+ # If the current listener was not matched against passed listeners and purge is True, mark for removal
+ if not current_listener_passed_to_module and self.purge_listeners:
+ listeners_to_delete.append(current_listener['ListenerArn'])
+
+ return listeners_to_add, listeners_to_modify, listeners_to_delete
+
+ def _compare_listener(self, current_listener, new_listener):
+ """
+ Compare two listeners.
+
+ :param current_listener:
+ :param new_listener:
+ :return:
+ """
+
+ modified_listener = {}
+
+ # Port
+ if current_listener['Port'] != new_listener['Port']:
+ modified_listener['Port'] = new_listener['Port']
+
+ # Protocol
+ if current_listener['Protocol'] != new_listener['Protocol']:
+ modified_listener['Protocol'] = new_listener['Protocol']
+
+ # If Protocol is HTTPS, check additional attributes
+ if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
+ # Cert
+ if current_listener['SslPolicy'] != new_listener['SslPolicy']:
+ modified_listener['SslPolicy'] = new_listener['SslPolicy']
+ if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']:
+ modified_listener['Certificates'] = []
+ modified_listener['Certificates'].append({})
+ modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
+ elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
+ modified_listener['SslPolicy'] = new_listener['SslPolicy']
+ modified_listener['Certificates'] = []
+ modified_listener['Certificates'].append({})
+ modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
+
+ # Default action
+
+ # If the lengths of the actions are the same, we'll have to verify that the
+ # contents of those actions are the same
+ if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']):
+ current_actions_sorted = _sort_actions(current_listener['DefaultActions'])
+ new_actions_sorted = _sort_actions(new_listener['DefaultActions'])
+
+ new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
+
+ if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]:
+ modified_listener['DefaultActions'] = new_listener['DefaultActions']
+ # If the action lengths are different, then replace with the new actions
+ else:
+ modified_listener['DefaultActions'] = new_listener['DefaultActions']
+
+ if modified_listener:
+ return modified_listener
+ else:
+ return None
+
+
+class ELBListener(object):
+
+ def __init__(self, connection, module, listener, elb_arn):
+ """
+
+ :param connection:
+ :param module:
+ :param listener:
+ :param elb_arn:
+ """
+
+ self.connection = connection
+ self.module = module
+ self.listener = listener
+ self.elb_arn = elb_arn
+
+ def add(self):
+
+ try:
+ # Rules is not a valid parameter for create_listener
+ if 'Rules' in self.listener:
+ self.listener.pop('Rules')
+ AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def modify(self):
+
+ try:
+ # Rules is not a valid parameter for modify_listener
+ if 'Rules' in self.listener:
+ self.listener.pop('Rules')
+ AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def delete(self):
+
+ try:
+ AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+
+class ELBListenerRules(object):
+
+ def __init__(self, connection, module, elb_arn, listener_rules, listener_port):
+
+ self.connection = connection
+ self.module = module
+ self.elb_arn = elb_arn
+ self.rules = self._ensure_rules_action_has_arn(listener_rules)
+ self.changed = False
+
+ # Get listener based on port so we can use ARN
+ self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port)
+ self.listener_arn = self.current_listener['ListenerArn']
+ self.rules_to_add = deepcopy(self.rules)
+ self.rules_to_modify = []
+ self.rules_to_delete = []
+
+ # If the listener exists (i.e. has an ARN) get rules for the listener
+ if 'ListenerArn' in self.current_listener:
+ self.current_rules = self._get_elb_listener_rules()
+ else:
+ self.current_rules = []
+
+ def _ensure_rules_action_has_arn(self, rules):
+ """
+ If a rule Action has been passed with a Target Group Name instead of ARN, lookup the ARN and
+ replace the name.
+
+ :param rules: a list of rule dicts
+ :return: the same list of dicts ensuring that each rule Actions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
+ """
+
+ fixed_rules = []
+ for rule in rules:
+ fixed_actions = []
+ for action in rule['Actions']:
+ if 'TargetGroupName' in action:
+ action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName'])
+ del action['TargetGroupName']
+ fixed_actions.append(action)
+ rule['Actions'] = fixed_actions
+ fixed_rules.append(rule)
+
+ return fixed_rules
+
+ def _get_elb_listener_rules(self):
+
+ try:
+ return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules']
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ def _compare_condition(self, current_conditions, condition):
+ """
+
+ :param current_conditions:
+ :param condition:
+ :return:
+ """
+
+ condition_found = False
+
+ for current_condition in current_conditions:
+ # host-header: current_condition includes both HostHeaderConfig AND Values while
+ # condition can be defined with either HostHeaderConfig OR Values. Only use
+ # HostHeaderConfig['Values'] comparison if both conditions includes HostHeaderConfig.
+ if current_condition.get('HostHeaderConfig') and condition.get('HostHeaderConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HostHeaderConfig']['Values']) == sorted(condition['HostHeaderConfig']['Values'])):
+ condition_found = True
+ break
+ elif current_condition.get('HttpHeaderConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HttpHeaderConfig']['Values']) == sorted(condition['HttpHeaderConfig']['Values']) and
+ current_condition['HttpHeaderConfig']['HttpHeaderName'] == condition['HttpHeaderConfig']['HttpHeaderName']):
+ condition_found = True
+ break
+ elif current_condition.get('HttpRequestMethodConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['HttpRequestMethodConfig']['Values']) == sorted(condition['HttpRequestMethodConfig']['Values'])):
+ condition_found = True
+ break
+ # path-pattern: current_condition includes both PathPatternConfig AND Values while
+ # condition can be defined with either PathPatternConfig OR Values. Only use
+ # PathPatternConfig['Values'] comparison if both conditions includes PathPatternConfig.
+ elif current_condition.get('PathPatternConfig') and condition.get('PathPatternConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['PathPatternConfig']['Values']) == sorted(condition['PathPatternConfig']['Values'])):
+ condition_found = True
+ break
+ elif current_condition.get('QueryStringConfig'):
+ # QueryString Values is not sorted as it is the only list of dicts (not strings).
+ if (current_condition['Field'] == condition['Field'] and
+ current_condition['QueryStringConfig']['Values'] == condition['QueryStringConfig']['Values']):
+ condition_found = True
+ break
+ elif current_condition.get('SourceIpConfig'):
+ if (current_condition['Field'] == condition['Field'] and
+ sorted(current_condition['SourceIpConfig']['Values']) == sorted(condition['SourceIpConfig']['Values'])):
+ condition_found = True
+ break
+ # Not all fields are required to have Values list nested within a *Config dict
+ # e.g. fields host-header/path-pattern can directly list Values
+ elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']):
+ condition_found = True
+ break
+
+ return condition_found
+
+ def _compare_rule(self, current_rule, new_rule):
+ """
+
+ :return:
+ """
+
+ modified_rule = {}
+
+ # Priority
+ if int(current_rule['Priority']) != int(new_rule['Priority']):
+ modified_rule['Priority'] = new_rule['Priority']
+
+ # Actions
+
+ # If the lengths of the actions are the same, we'll have to verify that the
+ # contents of those actions are the same
+ if len(current_rule['Actions']) == len(new_rule['Actions']):
+ # if actions have just one element, compare the contents and then update if
+ # they're different
+ current_actions_sorted = _sort_actions(current_rule['Actions'])
+ new_actions_sorted = _sort_actions(new_rule['Actions'])
+
+ new_current_actions_sorted = [_append_use_existing_client_secretn(i) for i in current_actions_sorted]
+ new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
+
+ if [_prune_ForwardConfig(i) for i in new_current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]:
+ modified_rule['Actions'] = new_rule['Actions']
+ # If the action lengths are different, then replace with the new actions
+ else:
+ modified_rule['Actions'] = new_rule['Actions']
+
+ # Conditions
+ modified_conditions = []
+ for condition in new_rule['Conditions']:
+ if not self._compare_condition(current_rule['Conditions'], condition):
+ modified_conditions.append(condition)
+
+ if modified_conditions:
+ modified_rule['Conditions'] = modified_conditions
+
+ return modified_rule
+
+ def compare_rules(self):
+ """
+
+ :return:
+ """
+
+ rules_to_modify = []
+ rules_to_delete = []
+ rules_to_add = deepcopy(self.rules)
+
+ for current_rule in self.current_rules:
+ current_rule_passed_to_module = False
+ for new_rule in self.rules[:]:
+ if current_rule['Priority'] == str(new_rule['Priority']):
+ current_rule_passed_to_module = True
+ # Remove what we match so that what is left can be marked as 'to be added'
+ rules_to_add.remove(new_rule)
+ modified_rule = self._compare_rule(current_rule, new_rule)
+ if modified_rule:
+ modified_rule['Priority'] = int(current_rule['Priority'])
+ modified_rule['RuleArn'] = current_rule['RuleArn']
+ modified_rule['Actions'] = new_rule['Actions']
+ modified_rule['Conditions'] = new_rule['Conditions']
+ rules_to_modify.append(modified_rule)
+ break
+
+ # If the current rule was not matched against passed rules, mark for removal
+ if not current_rule_passed_to_module and not current_rule['IsDefault']:
+ rules_to_delete.append(current_rule['RuleArn'])
+
+ return rules_to_add, rules_to_modify, rules_to_delete
+
+
+class ELBListenerRule(object):
+
+ def __init__(self, connection, module, rule, listener_arn):
+
+ self.connection = connection
+ self.module = module
+ self.rule = rule
+ self.listener_arn = listener_arn
+ self.changed = False
+
+ def create(self):
+ """
+ Create a listener rule
+
+ :return:
+ """
+
+ try:
+ self.rule['ListenerArn'] = self.listener_arn
+ self.rule['Priority'] = int(self.rule['Priority'])
+ AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def modify(self):
+ """
+ Modify a listener rule
+
+ :return:
+ """
+
+ try:
+ del self.rule['Priority']
+ AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule)
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def delete(self):
+ """
+ Delete a listener rule
+
+ :return:
+ """
+
+ try:
+ AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn'])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
new file mode 100644
index 000000000..6ebed23ba
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_native
+
+from .ec2 import AWSRetry
+from .core import is_boto3_error_code
+from .core import parse_aws_arn
+
+
+def get_aws_account_id(module):
+ """ Given an AnsibleAWSModule instance, get the active AWS account ID
+ """
+
+ return get_aws_account_info(module)[0]
+
+
+def get_aws_account_info(module):
+ """Given an AnsibleAWSModule instance, return the account information
+ (account id and partition) we are currently working on
+
+ get_account_info tries too find out the account that we are working
+ on. It's not guaranteed that this will be easy so we try in
+ several different ways. Giving either IAM or STS privileges to
+ the account should be enough to permit this.
+
+ Tries:
+ - sts:GetCallerIdentity
+ - iam:GetUser
+ - sts:DecodeAuthorizationMessage
+ """
+ account_id = None
+ partition = None
+ try:
+ sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+ caller_id = sts_client.get_caller_identity(aws_retry=True)
+ account_id = caller_id.get('Account')
+ partition = caller_id.get('Arn').split(':')[1]
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError):
+ try:
+ iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':')
+ except is_boto3_error_code('AccessDenied') as e:
+ try:
+ except_msg = to_native(e.message)
+ except AttributeError:
+ except_msg = to_native(e)
+ result = parse_aws_arn(except_msg)
+ if result is None or result['service'] != 'iam':
+ module.fail_json_aws(
+ e,
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+ account_id = result.get('account_id')
+ partition = result.get('partition')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e,
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+
+ if account_id is None or partition is None:
+ module.fail_json(
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ )
+
+ return (to_native(account_id), to_native(partition))
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/modules.py b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
new file mode 100644
index 000000000..7d4ba717f
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
@@ -0,0 +1,447 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module adds shared support for generic Amazon AWS modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+ module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
+ mutually_exclusive=list1, required_together=list2)
+
+The 'AnsibleAWSModule' module provides similar, but more restricted,
+interfaces to the normal Ansible module. It also includes the
+additional methods for connecting to AWS using the standard module arguments
+
+ m.resource('lambda') # - get an AWS connection as a boto3 resource.
+
+or
+
+ m.client('sts') # - get an AWS connection as a boto3 client.
+
+To make use of AWSRetry easier, it can now be wrapped around any call from a
+module-created client. To add retries to a client, create a client:
+
+ m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+ ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from functools import wraps
+import logging
+import os
+import re
+import traceback
+
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ # Python 3
+ from io import StringIO
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils._text import to_native
+
+from .botocore import HAS_BOTO3
+from .botocore import boto3_conn
+from .botocore import get_aws_connection_info
+from .botocore import get_aws_region
+from .botocore import gather_sdk_versions
+
+from .version import LooseVersion
+
+# Currently only AnsibleAWSModule. However we have a lot of Copy and Paste code
+# for Inventory and Lookup modules which we should refactor
+
+
+class AnsibleAWSModule(object):
+ """An ansible module class for AWS modules
+
+ AnsibleAWSModule provides an a class for building modules which
+ connect to Amazon Web Services. The interface is currently more
+ restricted than the basic module class with the aim that later the
+ basic module class can be reduced. If you find that any key
+ feature is missing please contact the author/Ansible AWS team
+ (available on #ansible-aws on IRC) to request the additional
+ features needed.
+ """
+ default_settings = {
+ "default_args": True,
+ "check_boto3": True,
+ "auto_retry": True,
+ "module_class": AnsibleModule
+ }
+
+ def __init__(self, **kwargs):
+ local_settings = {}
+ for key in AnsibleAWSModule.default_settings:
+ try:
+ local_settings[key] = kwargs.pop(key)
+ except KeyError:
+ local_settings[key] = AnsibleAWSModule.default_settings[key]
+ self.settings = local_settings
+
+ if local_settings["default_args"]:
+ argument_spec_full = aws_argument_spec()
+ try:
+ argument_spec_full.update(kwargs["argument_spec"])
+ except (TypeError, NameError):
+ pass
+ kwargs["argument_spec"] = argument_spec_full
+
+ self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
+
+ if local_settings["check_boto3"]:
+ if not HAS_BOTO3:
+ self._module.fail_json(
+ msg=missing_required_lib('botocore and boto3'))
+ if not self.botocore_at_least('1.21.0'):
+ self.warn('botocore < 1.21.0 is not supported or tested.'
+ ' Some features may not work.')
+ if not self.boto3_at_least("1.18.0"):
+ self.warn('boto3 < 1.18.0 is not supported or tested.'
+ ' Some features may not work.')
+
+ deprecated_vars = {'EC2_REGION', 'EC2_SECURITY_TOKEN', 'EC2_SECRET_KEY', 'EC2_ACCESS_KEY',
+ 'EC2_URL', 'S3_URL'}
+ if deprecated_vars.intersection(set(os.environ.keys())):
+ self._module.deprecate(
+ "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', "
+ "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment "
+ "variables has been deprecated. "
+ "These variables are currently used for all AWS services which can "
+ "cause confusion. We recomend using the relevant module "
+ "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', "
+ "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' "
+ "environment variables can be used instead.",
+ date='2024-12-01', collection_name='amazon.aws',
+ )
+
+ if 'AWS_SECURITY_TOKEN' in os.environ.keys():
+ self._module.deprecate(
+ "Support for the 'AWS_SECURITY_TOKEN' environment variable "
+ "has been deprecated. This variable was based on the original "
+ "boto SDK, support for which has now been dropped. "
+ "We recommend using the 'session_token' module parameter "
+ "or alternatively the 'AWS_SESSION_TOKEN' environment variable "
+ "can be used instead.",
+ date='2024-12-01', collection_name='amazon.aws',
+ )
+
+ self.check_mode = self._module.check_mode
+ self._diff = self._module._diff
+ self._name = self._module._name
+
+ self._botocore_endpoint_log_stream = StringIO()
+ self.logger = None
+ if self.params.get('debug_botocore_endpoint_logs'):
+ self.logger = logging.getLogger('botocore.endpoint')
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
+
+ @property
+ def params(self):
+ return self._module.params
+
+ def _get_resource_action_list(self):
+ actions = []
+ for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
+ ln = ln.strip()
+ if not ln:
+ continue
+ found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
+ if found_operational_request:
+ operation_request = found_operational_request.group(0)[20:-1]
+ resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
+ actions.append("{0}:{1}".format(resource, operation_request))
+ return list(set(actions))
+
+ def exit_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.exit_json(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.fail_json(*args, **kwargs)
+
+ def debug(self, *args, **kwargs):
+ return self._module.debug(*args, **kwargs)
+
+ def warn(self, *args, **kwargs):
+ return self._module.warn(*args, **kwargs)
+
+ def deprecate(self, *args, **kwargs):
+ return self._module.deprecate(*args, **kwargs)
+
+ def boolean(self, *args, **kwargs):
+ return self._module.boolean(*args, **kwargs)
+
+ def md5(self, *args, **kwargs):
+ return self._module.md5(*args, **kwargs)
+
+ def client(self, service, retry_decorator=None):
+ region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ conn = boto3_conn(self, conn_type='client', resource=service,
+ region=region, endpoint=endpoint_url, **aws_connect_kwargs)
+ return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
+
+ def resource(self, service):
+ region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ return boto3_conn(self, conn_type='resource', resource=service,
+ region=region, endpoint=endpoint_url, **aws_connect_kwargs)
+
+ @property
+ def region(self):
+ return get_aws_region(self, True)
+
+ def fail_json_aws(self, exception, msg=None, **kwargs):
+ """call fail_json with processed exception
+
+ function for converting exceptions thrown by AWS SDK modules,
+ botocore, boto3 and boto, into nice error messages.
+ """
+ last_traceback = traceback.format_exc()
+
+ # to_native is trusted to handle exceptions that str() could
+ # convert to text.
+ try:
+ except_msg = to_native(exception.message)
+ except AttributeError:
+ except_msg = to_native(exception)
+
+ if msg is not None:
+ message = '{0}: {1}'.format(msg, except_msg)
+ else:
+ message = except_msg
+
+ try:
+ response = exception.response
+ except AttributeError:
+ response = None
+
+ failure = dict(
+ msg=message,
+ exception=last_traceback,
+ **self._gather_versions()
+ )
+
+ failure.update(kwargs)
+
+ if response is not None:
+ failure.update(**camel_dict_to_snake_dict(response))
+
+ self.fail_json(**failure)
+
+ def _gather_versions(self):
+ """Gather AWS SDK (boto3 and botocore) dependency versions
+
+ Returns {'boto3_version': str, 'botocore_version': str}
+ Returns {} if either is not installed
+ """
+ return gather_sdk_versions()
+
+ def require_boto3_at_least(self, desired, **kwargs):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ calls fail_json() when the boto3 version is less than the desired
+ version
+
+ Usage:
+ module.require_boto3_at_least("1.2.3", reason="to update tags")
+ module.require_boto3_at_least("1.1.1")
+
+ :param desired the minimum desired version
+ :param reason why the version is required (optional)
+ """
+ if not self.boto3_at_least(desired):
+ self._module.fail_json(
+ msg=missing_required_lib('boto3>={0}'.format(desired), **kwargs),
+ **self._gather_versions()
+ )
+
+ def boto3_at_least(self, desired):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ Usage:
+ if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
+ # conditionally fail on old boto3 versions if a specific feature is not supported
+ module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
+
+ def require_botocore_at_least(self, desired, **kwargs):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ calls fail_json() when the botocore version is less than the desired
+ version
+
+ Usage:
+ module.require_botocore_at_least("1.2.3", reason="to update tags")
+ module.require_botocore_at_least("1.1.1")
+
+ :param desired the minimum desired version
+ :param reason why the version is required (optional)
+ """
+ if not self.botocore_at_least(desired):
+ self._module.fail_json(
+ msg=missing_required_lib('botocore>={0}'.format(desired), **kwargs),
+ **self._gather_versions()
+ )
+
+ def botocore_at_least(self, desired):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ Usage:
+ if not module.botocore_at_least('1.2.3'):
+ module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
+ if not module.botocore_at_least('1.5.3'):
+ module.warn('Botocore did not include waiters for Service X before 1.5.3. '
+ 'To wait until Service X resources are fully available, update botocore.')
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
+
+
+class _RetryingBotoClientWrapper(object):
+ __never_wait = (
+ 'get_paginator', 'can_paginate',
+ 'get_waiter', 'generate_presigned_url',
+ )
+
+ def __init__(self, client, retry):
+ self.client = client
+ self.retry = retry
+
+ def _create_optional_retry_wrapper_function(self, unwrapped):
+ retrying_wrapper = self.retry(unwrapped)
+
+ @wraps(unwrapped)
+ def deciding_wrapper(aws_retry=False, *args, **kwargs):
+ if aws_retry:
+ return retrying_wrapper(*args, **kwargs)
+ else:
+ return unwrapped(*args, **kwargs)
+ return deciding_wrapper
+
+ def __getattr__(self, name):
+ unwrapped = getattr(self.client, name)
+ if name in self.__never_wait:
+ return unwrapped
+ elif callable(unwrapped):
+ wrapped = self._create_optional_retry_wrapper_function(unwrapped)
+ setattr(self, name, wrapped)
+ return wrapped
+ else:
+ return unwrapped
+
+
+def _aws_common_argument_spec():
+ """
+ This does not include 'region' as some AWS APIs don't require a
+ region. However, it's not recommended to do this as it means module_defaults
+ can't include the region parameter.
+ """
+ return dict(
+ access_key=dict(
+ aliases=['aws_access_key_id', 'aws_access_key', 'ec2_access_key'],
+ deprecated_aliases=[
+ dict(name='ec2_access_key', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ no_log=False,
+ ),
+ secret_key=dict(
+ aliases=['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'],
+ deprecated_aliases=[
+ dict(name='ec2_secret_key', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ no_log=True,
+ ),
+ session_token=dict(
+ aliases=['aws_session_token', 'security_token', 'access_token', 'aws_security_token'],
+ deprecated_aliases=[
+ dict(name='access_token', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name='security_token', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name='aws_security_token', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ no_log=True,
+ ),
+ profile=dict(
+ aliases=['aws_profile'],
+ ),
+
+ endpoint_url=dict(
+ aliases=['aws_endpoint_url', 'ec2_url', 's3_url'],
+ deprecated_aliases=[
+ dict(name='ec2_url', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name='s3_url', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ fallback=(env_fallback, ['AWS_URL', 'EC2_URL', 'S3_URL']),
+ ),
+ validate_certs=dict(
+ type='bool',
+ default=True,
+ ),
+ aws_ca_bundle=dict(
+ type='path',
+ fallback=(env_fallback, ['AWS_CA_BUNDLE']),
+ ),
+ aws_config=dict(
+ type='dict',
+ ),
+ debug_botocore_endpoint_logs=dict(
+ type='bool',
+ default=False,
+ fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']),
+ ),
+ )
+
+
+def aws_argument_spec():
+ """
+ Returns a dictionary containing the argument_spec common to all AWS modules.
+ """
+ region_spec = dict(
+ region=dict(
+ aliases=['aws_region', 'ec2_region'],
+ deprecated_aliases=[
+ dict(name='ec2_region', date='2024-12-01', collection_name='amazon.aws'),
+ ],
+ fallback=(env_fallback, ['AWS_REGION', 'AWS_DEFAULT_REGION', 'EC2_REGION']),
+ ),
+ )
+ spec = _aws_common_argument_spec()
+ spec.update(region_spec)
+ return spec
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/policy.py b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
new file mode 100644
index 000000000..4aeabd5f2
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
@@ -0,0 +1,179 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from functools import cmp_to_key
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import binary_type
+from ansible.module_utils.six import string_types
+
+
+def _hashable_policy(policy, policy_list):
+ """
+ Takes a policy and returns a list, the contents of which are all hashable and sorted.
+ Example input policy:
+ {'Version': '2012-10-17',
+ 'Statement': [{'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }]}
+ Returned value:
+ [('Statement', ((('Action', ('s3:PutObjectAcl',)),
+ ('Effect', ('Allow',)),
+ ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
+ ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))),
+ ('Version', ('2012-10-17',)))]
+
+ """
+ # Amazon will automatically convert bool and int to strings for us
+ if isinstance(policy, bool):
+ return tuple([str(policy).lower()])
+ elif isinstance(policy, int):
+ return tuple([str(policy)])
+
+ if isinstance(policy, list):
+ for each in policy:
+ tupleified = _hashable_policy(each, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append(tupleified)
+ elif isinstance(policy, string_types) or isinstance(policy, binary_type):
+ policy = to_text(policy)
+ # convert root account ARNs to just account IDs
+ if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
+ policy = policy.split(':')[4]
+ return [policy]
+ elif isinstance(policy, dict):
+ sorted_keys = list(policy.keys())
+ sorted_keys.sort()
+ for key in sorted_keys:
+ element = policy[key]
+ # Special case defined in
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html
+ if key in ["NotPrincipal", "Principal"] and policy[key] == "*":
+ element = {"AWS": "*"}
+ tupleified = _hashable_policy(element, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append((key, tupleified))
+
+ # ensure we aren't returning deeply nested structures of length 1
+ if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
+ policy_list = policy_list[0]
+ if isinstance(policy_list, list):
+ policy_list.sort(key=cmp_to_key(_py3cmp))
+ return policy_list
+
+
+def _py3cmp(a, b):
+ """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
+ try:
+ if a > b:
+ return 1
+ elif a < b:
+ return -1
+ else:
+ return 0
+ except TypeError as e:
+ # check to see if they're tuple-string
+ # always say strings are less than tuples (to maintain compatibility with python2)
+ str_ind = to_text(e).find('str')
+ tup_ind = to_text(e).find('tuple')
+ if -1 not in (str_ind, tup_ind):
+ if str_ind < tup_ind:
+ return -1
+ elif tup_ind < str_ind:
+ return 1
+ raise
+
+
+def compare_policies(current_policy, new_policy, default_version="2008-10-17"):
+ """ Compares the existing policy and the updated policy
+ Returns True if there is a difference between policies.
+ """
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html
+ if default_version:
+ if isinstance(current_policy, dict):
+ current_policy = current_policy.copy()
+ current_policy.setdefault("Version", default_version)
+ if isinstance(new_policy, dict):
+ new_policy = new_policy.copy()
+ new_policy.setdefault("Version", default_version)
+
+ return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
+
+
+def sort_json_policy_dict(policy_dict):
+
+ """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
+ different orders will return true
+ Args:
+ policy_dict (dict): Dict representing IAM JSON policy.
+ Basic Usage:
+ >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
+ >>> sort_json_policy_dict(my_iam_policy)
+ Returns:
+ Dict: Will return a copy of the policy as a Dict but any List will be sorted
+ {
+ 'Principle': {
+ 'AWS': [ '7', '14', '31', '101' ]
+ }
+ }
+ """
+
+ def value_is_list(my_list):
+
+ checked_list = []
+ for item in my_list:
+ if isinstance(item, dict):
+ checked_list.append(sort_json_policy_dict(item))
+ elif isinstance(item, list):
+ checked_list.append(value_is_list(item))
+ else:
+ checked_list.append(item)
+
+ # Sort list. If it's a list of dictionaries, sort by tuple of key-value
+ # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
+ checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
+ return checked_list
+
+ ordered_policy_dict = {}
+ for key, value in policy_dict.items():
+ if isinstance(value, dict):
+ ordered_policy_dict[key] = sort_json_policy_dict(value)
+ elif isinstance(value, list):
+ ordered_policy_dict[key] = value_is_list(value)
+ else:
+ ordered_policy_dict[key] = value
+
+ return ordered_policy_dict
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
new file mode 100644
index 000000000..8b5bcb67c
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
@@ -0,0 +1,390 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import namedtuple
+from time import sleep
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, WaiterError
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from .ec2 import AWSRetry
+from .ec2 import ansible_dict_to_boto3_tag_list
+from .ec2 import boto3_tag_list_to_ansible_dict
+from .ec2 import compare_aws_tags
+from .waiters import get_waiter
+
+Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'resource', 'retry_codes'])
+# Whitelist boto3 client methods for cluster and instance resources
+cluster_method_names = [
+ 'create_db_cluster', 'restore_db_cluster_from_snapshot', 'restore_db_cluster_from_s3',
+ 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
+ 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
+]
+instance_method_names = [
+ 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
+ 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
+ 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
+ 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance', 'add_role_to_db_instance',
+ 'remove_role_from_db_instance'
+]
+
+cluster_snapshot_method_names = [
+ 'create_db_cluster_snapshot', 'delete_db_cluster_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource',
+ 'list_tags_for_resource', 'copy_db_cluster_snapshot'
+]
+
+instance_snapshot_method_names = [
+ 'create_db_snapshot', 'delete_db_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource',
+ 'copy_db_snapshot', 'list_tags_for_resource'
+]
+
+
+def get_rds_method_attribute(method_name, module):
+ '''
+ Returns rds attributes of the specified method.
+
+ Parameters:
+ method_name (str): RDS method to call
+ module: AnsibleAWSModule
+
+ Returns:
+ Boto3ClientMethod (dict):
+ name (str): Name of method
+ waiter (str): Name of waiter associated with given method
+ operation_description (str): Description of method
+ resource (str): Type of resource this method applies to
+ One of ['instance', 'cluster', 'instance_snapshot', 'cluster_snapshot']
+ retry_codes (list): List of extra error codes to retry on
+
+ Raises:
+ NotImplementedError if wait is True but no waiter can be found for specified method
+ '''
+ waiter = ''
+ readable_op = method_name.replace('_', ' ').replace('db', 'DB')
+ resource = ''
+ retry_codes = []
+ if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
+ resource = 'cluster'
+ if method_name == 'delete_db_cluster':
+ waiter = 'cluster_deleted'
+ else:
+ waiter = 'cluster_available'
+ # Handle retry codes
+ if method_name == 'restore_db_cluster_from_snapshot':
+ retry_codes = ['InvalidDBClusterSnapshotState']
+ else:
+ retry_codes = ['InvalidDBClusterState']
+ elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
+ resource = 'instance'
+ if method_name == 'delete_db_instance':
+ waiter = 'db_instance_deleted'
+ elif method_name == 'stop_db_instance':
+ waiter = 'db_instance_stopped'
+ elif method_name == 'add_role_to_db_instance':
+ waiter = 'role_associated'
+ elif method_name == 'remove_role_from_db_instance':
+ waiter = 'role_disassociated'
+ elif method_name == 'promote_read_replica':
+ waiter = 'read_replica_promoted'
+ else:
+ waiter = 'db_instance_available'
+ # Handle retry codes
+ if method_name == 'restore_db_instance_from_db_snapshot':
+ retry_codes = ['InvalidDBSnapshotState']
+ else:
+ retry_codes = ['InvalidDBInstanceState', 'InvalidDBSecurityGroupState']
+ elif method_name in cluster_snapshot_method_names and 'db_cluster_snapshot_identifier' in module.params:
+ resource = 'cluster_snapshot'
+ if method_name == 'delete_db_cluster_snapshot':
+ waiter = 'db_cluster_snapshot_deleted'
+ retry_codes = ['InvalidDBClusterSnapshotState']
+ elif method_name == 'create_db_cluster_snapshot':
+ waiter = 'db_cluster_snapshot_available'
+ retry_codes = ['InvalidDBClusterState']
+ else:
+ # Tagging
+ waiter = 'db_cluster_snapshot_available'
+ retry_codes = ['InvalidDBClusterSnapshotState']
+ elif method_name in instance_snapshot_method_names and 'db_snapshot_identifier' in module.params:
+ resource = 'instance_snapshot'
+ if method_name == 'delete_db_snapshot':
+ waiter = 'db_snapshot_deleted'
+ retry_codes = ['InvalidDBSnapshotState']
+ elif method_name == 'create_db_snapshot':
+ waiter = 'db_snapshot_available'
+ retry_codes = ['InvalidDBInstanceState']
+ else:
+ # Tagging
+ waiter = 'db_snapshot_available'
+ retry_codes = ['InvalidDBSnapshotState']
+ else:
+ if module.params.get('wait'):
+ raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name))
+
+ return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op,
+ resource=resource, retry_codes=retry_codes)
+
+
+def get_final_identifier(method_name, module):
+ updated_identifier = None
+ apply_immediately = module.params.get('apply_immediately')
+ resource = get_rds_method_attribute(method_name, module).resource
+ if resource == 'cluster':
+ identifier = module.params['db_cluster_identifier']
+ updated_identifier = module.params['new_db_cluster_identifier']
+ elif resource == 'instance':
+ identifier = module.params['db_instance_identifier']
+ updated_identifier = module.params['new_db_instance_identifier']
+ elif resource == 'instance_snapshot':
+ identifier = module.params['db_snapshot_identifier']
+ elif resource == 'cluster_snapshot':
+ identifier = module.params['db_cluster_snapshot_identifier']
+ else:
+ raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name))
+ if not module.check_mode and updated_identifier and apply_immediately:
+ identifier = updated_identifier
+ return identifier
+
+
+def handle_errors(module, exception, method_name, parameters):
+
+ if not isinstance(exception, ClientError):
+ module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
+
+ changed = True
+ error_code = exception.response['Error']['Code']
+ if (
+ method_name in ('modify_db_instance', 'modify_db_cluster') and
+ error_code == 'InvalidParameterCombination'
+ ):
+ if 'No modifications were requested' in to_text(exception):
+ changed = False
+ elif 'ModifyDbCluster API' in to_text(exception):
+ module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
+ if 'DB Instance is not a read replica' in to_text(exception):
+ changed = False
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ elif method_name == 'promote_read_replica_db_cluster' and error_code == 'InvalidDBClusterStateFault':
+ if 'DB Cluster that is not a read replica' in to_text(exception):
+ changed = False
+ else:
+ module.fail_json_aws(
+ exception,
+ msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description),
+ )
+ elif method_name == "create_db_cluster" and error_code == "InvalidParameterValue":
+ accepted_engines = ["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"]
+ if parameters.get("Engine") not in accepted_engines:
+ module.fail_json_aws(
+ exception, msg="DB engine {0} should be one of {1}".format(parameters.get("Engine"), accepted_engines)
+ )
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ else:
+ module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+
+ return changed
+
+
+def call_method(client, module, method_name, parameters):
+ result = {}
+ changed = True
+ if not module.check_mode:
+ wait = module.params.get('wait')
+ retry_codes = get_rds_method_attribute(method_name, module).retry_codes
+ method = getattr(client, method_name)
+ try:
+ result = AWSRetry.jittered_backoff(catch_extra_error_codes=retry_codes)(method)(**parameters)
+ except (BotoCoreError, ClientError) as e:
+ changed = handle_errors(module, e, method_name, parameters)
+
+ if wait and changed:
+ identifier = get_final_identifier(method_name, module)
+ wait_for_status(client, module, identifier, method_name)
+ return result, changed
+
+
+def wait_for_instance_status(client, module, db_instance_id, waiter_name):
+ def wait(client, db_instance_id, waiter_name):
+ try:
+ waiter = client.get_waiter(waiter_name)
+ except ValueError:
+ # using a waiter in module_utils/waiters.py
+ waiter = get_waiter(client, waiter_name)
+ waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
+
+ waiter_expected_status = {
+ 'db_instance_deleted': 'deleted',
+ 'db_instance_stopped': 'stopped',
+ }
+ expected_status = waiter_expected_status.get(waiter_name, 'available')
+ for _wait_attempts in range(0, 10):
+ try:
+ wait(client, db_instance_id, waiter_name)
+ break
+ except WaiterError as e:
+ # Instance may be renamed and AWSRetry doesn't handle WaiterError
+ if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
+ sleep(10)
+ continue
+ module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
+ db_instance_id, expected_status)
+ )
+
+
+def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
+ try:
+ get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
+ except WaiterError as e:
+ if waiter_name == 'cluster_deleted':
+ msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
+ else:
+ msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
+ module.fail_json_aws(e, msg=msg)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
+
+
+def wait_for_instance_snapshot_status(client, module, db_snapshot_id, waiter_name):
+ try:
+ client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id)
+ except WaiterError as e:
+ if waiter_name == 'db_snapshot_deleted':
+ msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id)
+ else:
+ msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id)
+ module.fail_json_aws(e, msg=msg)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB snapshot {0}".format(db_snapshot_id))
+
+
+def wait_for_cluster_snapshot_status(client, module, db_snapshot_id, waiter_name):
+ try:
+ client.get_waiter(waiter_name).wait(DBClusterSnapshotIdentifier=db_snapshot_id)
+ except WaiterError as e:
+ if waiter_name == 'db_cluster_snapshot_deleted':
+ msg = "Failed to wait for DB cluster snapshot {0} to be deleted".format(db_snapshot_id)
+ else:
+ msg = "Failed to wait for DB cluster snapshot {0} to be available".format(db_snapshot_id)
+ module.fail_json_aws(e, msg=msg)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster snapshot {0}".format(db_snapshot_id))
+
+
+def wait_for_status(client, module, identifier, method_name):
+ rds_method_attributes = get_rds_method_attribute(method_name, module)
+ waiter_name = rds_method_attributes.waiter
+ resource = rds_method_attributes.resource
+
+ if resource == 'cluster':
+ wait_for_cluster_status(client, module, identifier, waiter_name)
+ elif resource == 'instance':
+ wait_for_instance_status(client, module, identifier, waiter_name)
+ elif resource == 'instance_snapshot':
+ wait_for_instance_snapshot_status(client, module, identifier, waiter_name)
+ elif resource == 'cluster_snapshot':
+ wait_for_cluster_snapshot_status(client, module, identifier, waiter_name)
+
+
+def get_tags(client, module, resource_arn):
+ try:
+ return boto3_tag_list_to_ansible_dict(
+ client.list_tags_for_resource(ResourceName=resource_arn)['TagList']
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe tags")
+
+
+def arg_spec_to_rds_params(options_dict):
+ tags = options_dict.pop('tags')
+ has_processor_features = False
+ if 'processor_features' in options_dict:
+ has_processor_features = True
+ processor_features = options_dict.pop('processor_features')
+ camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
+ for key in list(camel_options.keys()):
+ for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
+ if old in key:
+ camel_options[key.replace(old, new)] = camel_options.pop(key)
+ camel_options['Tags'] = tags
+ if has_processor_features:
+ camel_options['ProcessorFeatures'] = processor_features
+ return camel_options
+
+
+def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
+ if tags is None:
+ return False
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
+ changed = bool(tags_to_add or tags_to_remove)
+ if tags_to_add:
+ call_method(
+ client, module, method_name='add_tags_to_resource',
+ parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
+ )
+ if tags_to_remove:
+ call_method(
+ client, module, method_name='remove_tags_from_resource',
+ parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
+ )
+ return changed
+
+
+def compare_iam_roles(existing_roles, target_roles, purge_roles):
+ '''
+ Returns differences between target and existing IAM roles
+
+ Parameters:
+ existing_roles (list): Existing IAM roles
+ target_roles (list): Target IAM roles
+ purge_roles (bool): Remove roles not in target_roles if True
+
+ Returns:
+ roles_to_add (list): List of IAM roles to add
+ roles_to_delete (list): List of IAM roles to delete
+ '''
+ existing_roles = [dict((k, v) for k, v in role.items() if k != 'status') for role in existing_roles]
+ roles_to_add = [role for role in target_roles if role not in existing_roles]
+ roles_to_remove = [role for role in existing_roles if role not in target_roles] if purge_roles else []
+ return roles_to_add, roles_to_remove
+
+
+def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove):
+ '''
+ Update a DB instance's associated IAM roles
+
+ Parameters:
+ client: RDS client
+ module: AnsibleAWSModule
+ instance_id (str): DB's instance ID
+ roles_to_add (list): List of IAM roles to add
+ roles_to_delete (list): List of IAM roles to delete
+
+ Returns:
+ changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not
+ '''
+ for role in roles_to_remove:
+ params = {'DBInstanceIdentifier': instance_id,
+ 'RoleArn': role['role_arn'],
+ 'FeatureName': role['feature_name']}
+ _result, changed = call_method(client, module, method_name='remove_role_from_db_instance', parameters=params)
+ for role in roles_to_add:
+ params = {'DBInstanceIdentifier': instance_id,
+ 'RoleArn': role['role_arn'],
+ 'FeatureName': role['feature_name']}
+ _result, changed = call_method(client, module, method_name='add_role_to_db_instance', parameters=params)
+ return changed
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/retries.py b/ansible_collections/amazon/aws/plugins/module_utils/retries.py
new file mode 100644
index 000000000..1bd214b6b
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/retries.py
@@ -0,0 +1,78 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from .cloud import CloudRetry
+
+
+def _botocore_exception_maybe():
+ """
+ Allow for boto3 not being installed when using these utils by wrapping
+ botocore.exceptions instead of assigning from it directly.
+ """
+ if HAS_BOTO3:
+ return ClientError
+ return type(None)
+
+
+class AWSRetry(CloudRetry):
+ base_class = _botocore_exception_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.response['Error']['Code']
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This list of failures is based on this API Reference
+ # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
+ #
+ # TooManyRequestsException comes from inside botocore when it
+ # does retrys, unfortunately however it does not try long
+ # enough to allow some services such as API Gateway to
+ # complete configuration. At the moment of writing there is a
+ # botocore/boto3 bug open to fix this.
+ #
+ # https://github.com/boto/boto3/issues/876 (and linked PRs etc)
+ retry_on = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling'
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/route53.py b/ansible_collections/amazon/aws/plugins/module_utils/route53.py
new file mode 100644
index 000000000..3e2940a53
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/route53.py
@@ -0,0 +1,64 @@
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+
+def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags):
+ if new_tags is None:
+ return False
+
+ old_tags = get_tags(module, client, resource_type, resource_id)
+ tags_to_set, tags_to_delete = compare_aws_tags(old_tags, new_tags, purge_tags=purge_tags)
+
+ change_params = dict()
+ if tags_to_set:
+ change_params['AddTags'] = ansible_dict_to_boto3_tag_list(tags_to_set)
+ if tags_to_delete:
+ change_params['RemoveTagKeys'] = tags_to_delete
+
+ if not change_params:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ client.change_tags_for_resource(
+ ResourceType=resource_type,
+ ResourceId=resource_id,
+ **change_params
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to update tags on {0}'.format(resource_type),
+ resource_id=resource_id, change_params=change_params)
+ return True
+
+
+def get_tags(module, client, resource_type, resource_id):
+ try:
+ tagset = client.list_tags_for_resource(
+ ResourceType=resource_type,
+ ResourceId=resource_id,
+ )
+ except is_boto3_error_code('NoSuchHealthCheck'):
+ return {}
+ except is_boto3_error_code('NoSuchHostedZone'): # pylint: disable=duplicate-except
+ return {}
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg='Failed to fetch tags on {0}'.format(resource_type),
+ resource_id=resource_id)
+
+ tags = boto3_tag_list_to_ansible_dict(tagset['ResourceTagSet']['Tags'])
+ return tags
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
new file mode 100644
index 000000000..c13c91f25
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by the calling module
+
+HAS_MD5 = True
+try:
+ from hashlib import md5
+except ImportError:
+ try:
+ from md5 import md5
+ except ImportError:
+ HAS_MD5 = False
+
+
+import string
+
+
+def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ with open(filename, 'rb') as f:
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ digests.append(md5(f.read(int(head['ContentLength']))))
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(module.md5(filename))
+
+
+def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+ offset = 0
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ length = int(head['ContentLength'])
+ digests.append(md5(content[offset:offset + length]))
+ offset += length
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(md5(content).hexdigest())
+
+
+def validate_bucket_name(module, name):
+ # See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
+ if len(name) < 3:
+ module.fail_json(msg='the length of an S3 bucket must be at least 3 characters')
+ if len(name) > 63:
+ module.fail_json(msg='the length of an S3 bucket cannot exceed 63 characters')
+
+ legal_characters = string.ascii_lowercase + ".-" + string.digits
+ illegal_characters = [c for c in name if c not in legal_characters]
+ if illegal_characters:
+ module.fail_json(msg='invalid character(s) found in the bucket name')
+ if name[-1] not in string.ascii_lowercase + string.digits:
+ module.fail_json(msg='bucket names must begin and end with a letter or number')
+ return True
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py
new file mode 100644
index 000000000..1568e4887
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py
@@ -0,0 +1,181 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ # minio seems to return [{}] as an empty tags_list
+ if not tags_list or not any(tag for tag in tags_list):
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
+
+
+def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
+
+ """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_dict = {'MyTagKey': 'MyTagValue'}
+ >>> ansible_dict_to_boto3_tag_list(tags_dict)
+ {
+ 'MyTagKey': 'MyTagValue'
+ }
+ Returns:
+ List: List of dicts containing tag keys and values
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ """
+
+ if not tags_dict:
+ return []
+
+ tags_list = []
+ for k, v in tags_dict.items():
+ tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
+
+ return tags_list
+
+
+def boto3_tag_specifications(tags_dict, types=None):
+ """ Converts a list of resource types and a flat dictionary of key:value pairs representing AWS
+ resource tags to a TagSpecification object.
+
+ https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TagSpecification.html
+
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+ types (list) A list of resource types to be tagged.
+ Basic Usage:
+ >>> tags_dict = {'MyTagKey': 'MyTagValue'}
+ >>> boto3_tag_specifications(tags_dict, ['instance'])
+ [
+ {
+ 'ResourceType': 'instance',
+ 'Tags': [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ }
+ ]
+ Returns:
+ List: List of dictionaries representing an AWS Tag Specification
+ """
+ if not tags_dict:
+ return None
+ specifications = list()
+ tag_list = ansible_dict_to_boto3_tag_list(tags_dict)
+
+ if not types:
+ specifications.append(dict(Tags=tag_list))
+ return specifications
+
+ if isinstance(types, string_types):
+ types = [types]
+
+ for type_name in types:
+ specifications.append(dict(ResourceType=type_name, Tags=tag_list))
+
+ return specifications
+
+
+def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
+ """
+ Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
+ Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
+ these may not be able to be used out of the box.
+
+ :param current_tags_dict:
+ :param new_tags_dict:
+ :param purge_tags:
+ :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
+ :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
+ """
+
+ tag_key_value_pairs_to_set = {}
+ tag_keys_to_unset = []
+
+ if purge_tags:
+ for key in current_tags_dict.keys():
+ if key in new_tags_dict:
+ continue
+ # Amazon have reserved 'aws:*' tags, we should avoid purging them as
+ # this probably isn't what people want to do...
+ if key.startswith('aws:'):
+ continue
+ tag_keys_to_unset.append(key)
+
+ for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
+ if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
+ tag_key_value_pairs_to_set[key] = new_tags_dict[key]
+
+ return tag_key_value_pairs_to_set, tag_keys_to_unset
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tower.py b/ansible_collections/amazon/aws/plugins/module_utils/tower.py
new file mode 100644
index 000000000..dd7d9738a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/tower.py
@@ -0,0 +1,83 @@
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import string
+import textwrap
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves.urllib import parse as urlparse
+
+
+def _windows_callback_script(passwd=None):
+ script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'
+ if passwd is not None:
+ passwd = passwd.replace("'", "''")
+ script_tpl = """\
+ <powershell>
+ $admin = [adsi]('WinNT://./administrator, user')
+ $admin.PSBase.Invoke('SetPassword', '${PASS}')
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('${SCRIPT}'))
+ </powershell>
+ """
+ else:
+ script_tpl = """\
+ <powershell>
+ $admin = [adsi]('WinNT://./administrator, user')
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('${SCRIPT}'))
+ </powershell>
+ """
+
+ tpl = string.Template(textwrap.dedent(script_tpl))
+ return tpl.safe_substitute(PASS=passwd, SCRIPT=script_url)
+
+
+def _linux_callback_script(tower_address, template_id, host_config_key):
+ template_id = urlparse.quote(template_id)
+ tower_address = urlparse.quote(tower_address)
+ host_config_key = host_config_key.replace("'", "'\"'\"'")
+
+ script_tpl = """\
+ #!/bin/bash
+ set -x
+
+ retry_attempts=10
+ attempt=0
+ while [[ $attempt -lt $retry_attempts ]]
+ do
+ status_code=$(curl --max-time 10 -v -k -s -i \
+ --data 'host_config_key=${host_config_key}' \
+ 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}')
+ if [[ $status_code == 404 ]]
+ then
+ status_code=$(curl --max-time 10 -v -k -s -i \
+ --data 'host_config_key=${host_config_key}' \
+ 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}')
+ # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404
+ fi
+ if [[ $status_code == 201 ]]
+ then
+ exit 0
+ fi
+ attempt=$(( attempt + 1 ))
+ echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})"
+ sleep 60
+ done
+ exit 1
+ """
+ tpl = string.Template(textwrap.dedent(script_tpl))
+ return tpl.safe_substitute(tower_address=tower_address,
+ template_id=template_id,
+ host_config_key=host_config_key)
+
+
+def tower_callback_script(tower_address, job_template_id, host_config_key, windows, passwd):
+ if windows:
+ return to_native(_windows_callback_script(passwd=passwd))
+ return _linux_callback_script(tower_address, job_template_id, host_config_key)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py
new file mode 100644
index 000000000..70d38cd8a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py
@@ -0,0 +1,140 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, bool):
+ filter_dict['Values'] = [str(v).lower()]
+ elif isinstance(v, integer_types):
+ filter_dict['Values'] = [str(v)]
+ elif isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def map_complex_type(complex_type, type_map):
+ """
+ Allows to cast elements within a dictionary to a specific type
+ Example of usage:
+
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int'
+ }
+
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+
+ This ensures all keys within the root element are casted and valid integers
+ """
+
+ if complex_type is None:
+ return
+ new_type = type(complex_type)()
+ if isinstance(complex_type, dict):
+ for key in complex_type:
+ if key in type_map:
+ if isinstance(type_map[key], list):
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key][0])
+ else:
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key])
+ else:
+ new_type[key] = complex_type[key]
+ elif isinstance(complex_type, list):
+ for i in range(len(complex_type)):
+ new_type.append(map_complex_type(
+ complex_type[i],
+ type_map))
+ elif type_map:
+ return globals()['__builtins__'][type_map](complex_type)
+ return new_type
+
+
+def scrub_none_parameters(parameters, descend_into_lists=True):
+ """
+ Iterate over a dictionary removing any keys that have a None value
+
+ Reference: https://github.com/ansible-collections/community.aws/issues/251
+ Credit: https://medium.com/better-programming/how-to-remove-null-none-values-from-a-dictionary-in-python-1bedf1aab5e4
+
+ :param descend_into_lists: whether or not to descend in to lists to continue to remove None values
+ :param parameters: parameter dict
+ :return: parameter dict with all keys = None removed
+ """
+
+ clean_parameters = {}
+
+ for k, v in parameters.items():
+ if isinstance(v, dict):
+ clean_parameters[k] = scrub_none_parameters(v, descend_into_lists=descend_into_lists)
+ elif descend_into_lists and isinstance(v, list):
+ clean_parameters[k] = [scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv for vv in v]
+ elif v is not None:
+ clean_parameters[k] = v
+
+ return clean_parameters
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/urls.py b/ansible_collections/amazon/aws/plugins/module_utils/urls.py
new file mode 100644
index 000000000..8011a1be9
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/urls.py
@@ -0,0 +1,238 @@
+# Copyright: (c) 2018, Aaron Haaf <aabonh@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import hashlib
+import hmac
+import operator
+
+try:
+ from boto3 import session
+except ImportError:
+ pass
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+
+from .ec2 import HAS_BOTO3
+from .ec2 import get_aws_connection_info
+
+import ansible.module_utils.common.warnings as ansible_warnings
+
+
+def hexdigest(s):
+ """
+ Returns the sha256 hexdigest of a string after encoding.
+ """
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.hexdigest is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ return hashlib.sha256(s.encode("utf-8")).hexdigest()
+
+
+def format_querystring(params=None):
+ """
+ Returns properly url-encoded query string from the provided params dict.
+
+ It's specially sorted for cannonical requests
+ """
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.format_querystring is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ if not params:
+ return ""
+
+ # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name.
+ return urlencode(sorted(params.items(), operator.itemgetter(0)))
+
+
+# Key derivation functions. See:
+# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
+def sign(key, msg):
+ '''
+ Return digest for key applied to msg
+ '''
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.sign is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
+
+
+def get_signature_key(key, dateStamp, regionName, serviceName):
+ '''
+ Returns signature key for AWS resource
+ '''
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.get_signature_key is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp)
+ kRegion = sign(kDate, regionName)
+ kService = sign(kRegion, serviceName)
+ kSigning = sign(kService, "aws4_request")
+ return kSigning
+
+
+def get_aws_credentials_object(module):
+ '''
+ Returns aws_access_key_id, aws_secret_access_key, session_token for a module.
+ '''
+
+ ansible_warnings.deprecate(
+ 'amazon.aws.module_utils.urls.get_aws_credentials_object is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ if not HAS_BOTO3:
+ module.fail_json("get_aws_credentials_object requires boto3")
+
+ dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True)
+ s = session.Session(**boto_params)
+
+ return s.get_credentials()
+
+
+# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
+def signed_request(
+ module=None,
+ method="GET", service=None, host=None, uri=None,
+ query=None, body="", headers=None,
+ session_in_header=True, session_in_query=False
+):
+ """Generate a SigV4 request to an AWS resource for a module
+
+ This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain.
+
+ Returns :class:`HTTPResponse` object.
+
+ Example:
+ result = signed_request(
+ module=this,
+ service="es",
+ host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com",
+ )
+
+ :kwarg host: endpoint to talk to
+ :kwarg service: AWS id of service (like `ec2` or `es`)
+ :kwarg module: An AnsibleAWSModule to gather connection info from
+
+ :kwarg body: (optional) Payload to send
+ :kwarg method: (optional) HTTP verb to use
+ :kwarg query: (optional) dict of query params to handle
+ :kwarg uri: (optional) Resource path without query parameters
+
+ :kwarg session_in_header: (optional) Add the session token to the headers
+ :kwarg session_in_query: (optional) Add the session token to the query parameters
+
+ :returns: HTTPResponse
+ """
+
+ module.deprecate(
+ 'amazon.aws.module_utils.urls.signed_request is unused and has been deprecated.',
+ version='7.0.0', collection_name='amazon.aws')
+
+ if not HAS_BOTO3:
+ module.fail_json("A sigv4 signed_request requires boto3")
+
+ # "Constants"
+
+ t = datetime.datetime.utcnow()
+ amz_date = t.strftime("%Y%m%dT%H%M%SZ")
+ datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope
+ algorithm = "AWS4-HMAC-SHA256"
+
+ # AWS stuff
+
+ region, dummy, dummy = get_aws_connection_info(module, boto3=True)
+ credentials = get_aws_credentials_object(module)
+ access_key = credentials.access_key
+ secret_key = credentials.secret_key
+ session_token = credentials.token
+
+ if not access_key:
+ module.fail_json(msg="aws_access_key_id is missing")
+ if not secret_key:
+ module.fail_json(msg="aws_secret_access_key is missing")
+
+ credential_scope = "/".join([datestamp, region, service, "aws4_request"])
+
+ # Argument Defaults
+
+ uri = uri or "/"
+ query_string = format_querystring(query) if query else ""
+
+ headers = headers or dict()
+ query = query or dict()
+
+ headers.update({
+ "host": host,
+ "x-amz-date": amz_date,
+ })
+
+ # Handle adding of session_token if present
+ if session_token:
+ if session_in_header:
+ headers["X-Amz-Security-Token"] = session_token
+ if session_in_query:
+ query["X-Amz-Security-Token"] = session_token
+
+ if method == "GET":
+ body = ""
+
+ # Derived data
+
+ body_hash = hexdigest(body)
+ signed_headers = ";".join(sorted(headers.keys()))
+
+ # Setup Cannonical request to generate auth token
+
+ cannonical_headers = "\n".join([
+ key.lower().strip() + ":" + value for key, value in headers.items()
+ ]) + "\n" # Note additional trailing newline
+
+ cannonical_request = "\n".join([
+ method,
+ uri,
+ query_string,
+ cannonical_headers,
+ signed_headers,
+ body_hash,
+ ])
+
+ string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)])
+
+ # Sign the Cannonical request
+
+ signing_key = get_signature_key(secret_key, datestamp, region, service)
+ signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
+
+ # Make auth header with that info
+
+ authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format(
+ algorithm, access_key, credential_scope, signed_headers, signature
+ )
+
+ # PERFORM THE REQUEST!
+
+ url = "https://" + host + uri
+
+ if query_string != "":
+ url = url + "?" + query_string
+
+ final_headers = {
+ "x-amz-date": amz_date,
+ "Authorization": authorization_header,
+ }
+
+ final_headers.update(headers)
+
+ return open_url(url, method=method, data=body, headers=final_headers)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/version.py b/ansible_collections/amazon/aws/plugins/module_utils/version.py
new file mode 100644
index 000000000..8f4ca3638
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/version.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Provide version object to compare version numbers."""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can
+# remove the _version.py file, and replace the following import by
+#
+# from ansible.module_utils.compat.version import LooseVersion
+
+from ._version import LooseVersion # pylint: disable=unused-import
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waf.py b/ansible_collections/amazon/aws/plugins/module_utils/waf.py
new file mode 100644
index 000000000..226dca920
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/waf.py
@@ -0,0 +1,224 @@
+# Copyright (c) 2017 Will Thames
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""
+This module adds shared support for Web Application Firewall modules
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported HAS_BOTO3
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from .ec2 import AWSRetry
+from .waiters import get_waiter
+
+
+MATCH_LOOKUP = {
+ 'byte': {
+ 'method': 'byte_match_set',
+ 'conditionset': 'ByteMatchSet',
+ 'conditiontuple': 'ByteMatchTuple',
+ 'type': 'ByteMatch'
+ },
+ 'geo': {
+ 'method': 'geo_match_set',
+ 'conditionset': 'GeoMatchSet',
+ 'conditiontuple': 'GeoMatchConstraint',
+ 'type': 'GeoMatch'
+ },
+ 'ip': {
+ 'method': 'ip_set',
+ 'conditionset': 'IPSet',
+ 'conditiontuple': 'IPSetDescriptor',
+ 'type': 'IPMatch'
+ },
+ 'regex': {
+ 'method': 'regex_match_set',
+ 'conditionset': 'RegexMatchSet',
+ 'conditiontuple': 'RegexMatchTuple',
+ 'type': 'RegexMatch'
+ },
+ 'size': {
+ 'method': 'size_constraint_set',
+ 'conditionset': 'SizeConstraintSet',
+ 'conditiontuple': 'SizeConstraint',
+ 'type': 'SizeConstraint'
+ },
+ 'sql': {
+ 'method': 'sql_injection_match_set',
+ 'conditionset': 'SqlInjectionMatchSet',
+ 'conditiontuple': 'SqlInjectionMatchTuple',
+ 'type': 'SqlInjectionMatch',
+ },
+ 'xss': {
+ 'method': 'xss_match_set',
+ 'conditionset': 'XssMatchSet',
+ 'conditiontuple': 'XssMatchTuple',
+ 'type': 'XssMatch'
+ },
+}
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_rule_with_backoff(client, rule_id):
+ return client.get_rule(RuleId=rule_id)['Rule']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_byte_match_set_with_backoff(client, byte_match_set_id):
+ return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_ip_set_with_backoff(client, ip_set_id):
+ return client.get_ip_set(IPSetId=ip_set_id)['IPSet']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_size_constraint_set_with_backoff(client, size_constraint_set_id):
+ return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id):
+ return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_xss_match_set_with_backoff(client, xss_match_set_id):
+ return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet']
+
+
+def get_rule(client, module, rule_id):
+ try:
+ rule = get_rule_with_backoff(client, rule_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain waf rule")
+
+ match_sets = {
+ 'ByteMatch': get_byte_match_set_with_backoff,
+ 'IPMatch': get_ip_set_with_backoff,
+ 'SizeConstraint': get_size_constraint_set_with_backoff,
+ 'SqlInjectionMatch': get_sql_injection_match_set_with_backoff,
+ 'XssMatch': get_xss_match_set_with_backoff
+ }
+ if 'Predicates' in rule:
+ for predicate in rule['Predicates']:
+ if predicate['Type'] in match_sets:
+ predicate.update(match_sets[predicate['Type']](client, predicate['DataId']))
+ # replaced by Id from the relevant MatchSet
+ del predicate['DataId']
+ return rule
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def get_web_acl_with_backoff(client, web_acl_id):
+ return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
+
+
+def get_web_acl(client, module, web_acl_id):
+ try:
+ web_acl = get_web_acl_with_backoff(client, web_acl_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acl")
+
+ if web_acl:
+ try:
+ for rule in web_acl['Rules']:
+ rule.update(get_rule(client, module, rule['RuleId']))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acl rule")
+ return camel_dict_to_snake_dict(web_acl)
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def list_rules_with_backoff(client):
+ paginator = client.get_paginator('list_rules')
+ return paginator.paginate().build_full_result()['Rules']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def list_regional_rules_with_backoff(client):
+ resp = client.list_rules()
+ rules = []
+ while resp:
+ rules += resp['Rules']
+ resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
+ return rules
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def list_web_acls_with_backoff(client):
+ paginator = client.get_paginator('list_web_acls')
+ return paginator.paginate().build_full_result()['WebACLs']
+
+
+@AWSRetry.jittered_backoff(delay=5)
+def list_regional_web_acls_with_backoff(client):
+ resp = client.list_web_acls()
+ acls = []
+ while resp:
+ acls += resp['WebACLs']
+ resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
+ return acls
+
+
+def list_web_acls(client, module):
+ try:
+ if client.__class__.__name__ == 'WAF':
+ return list_web_acls_with_backoff(client)
+ elif client.__class__.__name__ == 'WAFRegional':
+ return list_regional_web_acls_with_backoff(client)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain web acls")
+
+
+def get_change_token(client, module):
+ try:
+ token = client.get_change_token()
+ return token['ChangeToken']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain change token")
+
+
+@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=['WAFStaleDataException'])
+def run_func_with_change_token_backoff(client, module, params, func, wait=False):
+ params['ChangeToken'] = get_change_token(client, module)
+ result = func(**params)
+ if wait:
+ get_waiter(
+ client, 'change_token_in_sync',
+ ).wait(
+ ChangeToken=result['ChangeToken']
+ )
+ return result
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
new file mode 100644
index 000000000..2abf390cb
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
@@ -0,0 +1,1265 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+
+try:
+ import botocore.waiter as core_waiter
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import _RetryingBotoClientWrapper
+
+
+ec2_data = {
+ "version": 2,
+ "waiters": {
+ "ImageAvailable": {
+ "operation": "DescribeImages",
+ "maxAttempts": 80,
+ "delay": 15,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "Images[].State",
+ "expected": "available"
+ },
+ {
+ "state": "failure",
+ "matcher": "pathAny",
+ "argument": "Images[].State",
+ "expected": "failed"
+ }
+ ]
+ },
+ "InternetGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeInternetGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(InternetGateways) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInternetGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "InternetGatewayAttached": {
+ "operation": "DescribeInternetGateways",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "InternetGateways[].Attachments[].State"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInternetGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "NetworkInterfaceAttached": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "attached",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.Status"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "NetworkInterfaceAvailable": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Status"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "retry"
+ },
+ ]
+ },
+ "NetworkInterfaceDeleted": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(NetworkInterfaces[]) > `0`",
+ "state": "retry"
+ },
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(NetworkInterfaces[]) == `0`",
+ "state": "success"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "success"
+ },
+ ]
+ },
+ "NetworkInterfaceDeleteOnTerminate": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 10,
+ "acceptors": [
+ {
+ "expected": True,
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "NetworkInterfaceNoDeleteOnTerminate": {
+ "operation": "DescribeNetworkInterfaces",
+ "delay": 5,
+ "maxAttempts": 10,
+ "acceptors": [
+ {
+ "expected": False,
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination"
+ },
+ {
+ "expected": "InvalidNetworkInterfaceID.NotFound",
+ "matcher": "error",
+ "state": "failure"
+ },
+ ]
+ },
+ "RouteTableExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeRouteTables",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(RouteTables[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidRouteTableID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SecurityGroupExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSecurityGroups",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(SecurityGroups[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidGroup.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SnapshotCompleted": {
+ "delay": 15,
+ "operation": "DescribeSnapshots",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "completed",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "Snapshots[].State"
+ }
+ ]
+ },
+ "SubnetAvailable": {
+ "delay": 15,
+ "operation": "DescribeSubnets",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "Subnets[].State"
+ }
+ ]
+ },
+ "SubnetExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetHasMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetHasAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetDeleted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "retry"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "success"
+ },
+ ]
+ },
+ "VpcAvailable": {
+ "delay": 15,
+ "operation": "DescribeVpcs",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "available",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "Vpcs[].State"
+ }
+ ]
+ },
+ "VpcExists": {
+ "operation": "DescribeVpcs",
+ "delay": 1,
+ "maxAttempts": 5,
+ "acceptors": [
+ {
+ "matcher": "status",
+ "expected": 200,
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpcID.NotFound",
+ "state": "retry"
+ }
+ ]
+ },
+ "VpcEndpointExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpcEndpoints",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(VpcEndpoints[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpcEndpointId.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "VpnGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(VpnGateways[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpnGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "VpnGatewayDetached": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "VpnGateways[0].State == 'available'",
+ "state": "success"
+ },
+ ]
+ },
+ "NatGatewayDeleted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeNatGateways",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "expected": "deleted",
+ "argument": "NatGateways[].State"
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "NatGatewayNotFound"
+ }
+ ]
+ },
+ "NatGatewayAvailable": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeNatGateways",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "expected": "available",
+ "argument": "NatGateways[].State"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "NatGatewayNotFound"
+ }
+ ]
+ },
+ }
+}
+
+
+waf_data = {
+ "version": 2,
+ "waiters": {
+ "ChangeTokenInSync": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "GetChangeTokenStatus",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "ChangeTokenStatus == 'INSYNC'",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "WAFInternalErrorException",
+ "state": "retry"
+ }
+ ]
+ }
+ }
+}
+
+eks_data = {
+ "version": 2,
+ "waiters": {
+ "ClusterActive": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "cluster.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "ClusterDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "cluster.status != 'DELETED'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "FargateProfileActive": {
+ "delay": 20,
+ "maxAttempts": 30,
+ "operation": "DescribeFargateProfile",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "fargateProfile.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "FargateProfileDeleted": {
+ "delay": 20,
+ "maxAttempts": 30,
+ "operation": "DescribeFargateProfile",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "fargateProfile.status == 'DELETING'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "NodegroupActive": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeNodegroup",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "nodegroup.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "NodegroupDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeNodegroup",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "nodegroup.status == 'DELETING'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ }
+ }
+}
+
+
+elb_data = {
+ "version": 2,
+ "waiters": {
+ "AnyInstanceInService": {
+ "acceptors": [
+ {
+ "argument": "InstanceStates[].State",
+ "expected": "InService",
+ "matcher": "pathAny",
+ "state": "success"
+ }
+ ],
+ "delay": 15,
+ "maxAttempts": 40,
+ "operation": "DescribeInstanceHealth"
+ },
+ "InstanceDeregistered": {
+ "delay": 15,
+ "operation": "DescribeInstanceHealth",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "expected": "OutOfService",
+ "matcher": "pathAll",
+ "state": "success",
+ "argument": "InstanceStates[].State"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInstance",
+ "state": "success"
+ }
+ ]
+ },
+ "InstanceInService": {
+ "acceptors": [
+ {
+ "argument": "InstanceStates[].State",
+ "expected": "InService",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInstance",
+ "state": "retry"
+ }
+ ],
+ "delay": 15,
+ "maxAttempts": 40,
+ "operation": "DescribeInstanceHealth"
+ },
+ "LoadBalancerCreated": {
+ "delay": 10,
+ "maxAttempts": 60,
+ "operation": "DescribeLoadBalancers",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(LoadBalancerDescriptions[]) > `0`",
+ "state": "success",
+ },
+ {
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound",
+ "state": "retry",
+ },
+ ],
+ },
+ "LoadBalancerDeleted": {
+ "delay": 10,
+ "maxAttempts": 60,
+ "operation": "DescribeLoadBalancers",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(LoadBalancerDescriptions[]) > `0`",
+ "state": "retry",
+ },
+ {
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound",
+ "state": "success",
+ },
+ ],
+ },
+ }
+}
+
+elbv2_data = {
+ "version": 2,
+ "waiters": {
+ "LoadBalancerAvailable": {
+ "delay": 15,
+ "operation": "DescribeLoadBalancers",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "LoadBalancers[].State.Code",
+ "expected": "active"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "LoadBalancers[].State.Code",
+ "expected": "provisioning"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound"
+ }
+ ]
+ },
+ "LoadBalancerIpAddressTypeIpv4": {
+ "delay": 15,
+ "operation": "DescribeLoadBalancers",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "LoadBalancers[].IpAddressType",
+ "expected": "ipv4"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "LoadBalancers[].IpAddressType",
+ "expected": "dualstack"
+ },
+ {
+ "state": "failure",
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound"
+ }
+ ]
+ },
+ "LoadBalancerIpAddressTypeDualStack": {
+ "delay": 15,
+ "operation": "DescribeLoadBalancers",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "LoadBalancers[].IpAddressType",
+ "expected": "dualstack"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "LoadBalancers[].IpAddressType",
+ "expected": "ipv4"
+ },
+ {
+ "state": "failure",
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound"
+ }
+ ]
+ },
+ "LoadBalancersDeleted": {
+ "delay": 15,
+ "operation": "DescribeLoadBalancers",
+ "maxAttempts": 40,
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "pathAll",
+ "argument": "LoadBalancers[].State.Code",
+ "expected": "active"
+ },
+ {
+ "matcher": "error",
+ "expected": "LoadBalancerNotFound",
+ "state": "success"
+ }
+ ]
+ },
+ }
+}
+
+
+rds_data = {
+ "version": 2,
+ "waiters": {
+ "DBInstanceStopped": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].DBInstanceStatus",
+ "expected": "stopped"
+ },
+ ]
+ },
+ "DBClusterAvailable": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBClusters",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBClusters[].Status",
+ "expected": "available"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "DBClusterNotFoundFault"
+ }
+ ]
+ },
+ "DBClusterDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBClusters",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBClusters[].Status",
+ "expected": "stopped"
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "DBClusterNotFoundFault"
+ }
+ ]
+ },
+ "ReadReplicaPromoted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "length(DBInstances[].StatusInfos) == `0`",
+ "expected": True
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "DBInstances[].StatusInfos[].Status",
+ "expected": "replicating"
+ }
+ ]
+ },
+ "RoleAssociated": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].AssociatedRoles[].Status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "DBInstances[].AssociatedRoles[].Status",
+ "expected": "PENDING"
+ }
+ ]
+ },
+ "RoleDisassociated": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].AssociatedRoles[].Status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "pathAny",
+ "argument": "DBInstances[].AssociatedRoles[].Status",
+ "expected": "PENDING"
+ },
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "length(DBInstances[].AssociatedRoles[]) == `0`",
+ "expected": True
+ },
+ ]
+ }
+ }
+}
+
+
+route53_data = {
+ "version": 2,
+ "waiters": {
+ "ResourceRecordSetsChanged": {
+ "delay": 30,
+ "maxAttempts": 60,
+ "operation": "GetChange",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": "INSYNC",
+ "argument": "ChangeInfo.Status",
+ "state": "success"
+ }
+ ]
+ }
+ }
+}
+
+
+def _inject_limit_retries(model):
+
+ extra_retries = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling']
+
+ acceptors = []
+ for error in extra_retries:
+ acceptors.append({"state": "success", "matcher": "error", "expected": error})
+
+ _model = copy.deepcopy(model)
+
+ for waiter in model["waiters"]:
+ _model["waiters"][waiter]["acceptors"].extend(acceptors)
+
+ return _model
+
+
+def ec2_model(name):
+ ec2_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(ec2_data))
+ return ec2_models.get_waiter(name)
+
+
+def waf_model(name):
+ waf_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(waf_data))
+ return waf_models.get_waiter(name)
+
+
+def eks_model(name):
+ eks_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(eks_data))
+ return eks_models.get_waiter(name)
+
+
+def elbv2_model(name):
+ elbv2_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(elbv2_data))
+ return elbv2_models.get_waiter(name)
+
+
+def elb_model(name):
+ elb_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(elb_data))
+ return elb_models.get_waiter(name)
+
+
+def rds_model(name):
+ rds_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(rds_data))
+ return rds_models.get_waiter(name)
+
+
+def route53_model(name):
+ route53_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(route53_data))
+ return route53_models.get_waiter(name)
+
+
+waiters_by_name = {
+ ('EC2', 'image_available'): lambda ec2: core_waiter.Waiter(
+ 'image_available',
+ ec2_model('ImageAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_images
+ )),
+ ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'internet_gateway_exists',
+ ec2_model('InternetGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_internet_gateways
+ )),
+ ('EC2', 'internet_gateway_attached'): lambda ec2: core_waiter.Waiter(
+ 'internet_gateway_attached',
+ ec2_model('InternetGatewayAttached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_internet_gateways
+ )),
+ ('EC2', 'network_interface_attached'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_attached',
+ ec2_model('NetworkInterfaceAttached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_deleted'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_deleted',
+ ec2_model('NetworkInterfaceDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_available'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_available',
+ ec2_model('NetworkInterfaceAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_delete_on_terminate'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_delete_on_terminate',
+ ec2_model('NetworkInterfaceDeleteOnTerminate'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'network_interface_no_delete_on_terminate'): lambda ec2: core_waiter.Waiter(
+ 'network_interface_no_delete_on_terminate',
+ ec2_model('NetworkInterfaceNoDeleteOnTerminate'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_network_interfaces
+ )),
+ ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
+ 'route_table_exists',
+ ec2_model('RouteTableExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_route_tables
+ )),
+ ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
+ 'security_group_exists',
+ ec2_model('SecurityGroupExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_security_groups
+ )),
+ ('EC2', 'snapshot_completed'): lambda ec2: core_waiter.Waiter(
+ 'snapshot_completed',
+ ec2_model('SnapshotCompleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_snapshots
+ )),
+ ('EC2', 'subnet_available'): lambda ec2: core_waiter.Waiter(
+ 'subnet_available',
+ ec2_model('SubnetAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
+ 'subnet_exists',
+ ec2_model('SubnetExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_map_public',
+ ec2_model('SubnetHasMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_map_public',
+ ec2_model('SubnetNoMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_assign_ipv6',
+ ec2_model('SubnetHasAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_assign_ipv6',
+ ec2_model('SubnetNoAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
+ 'subnet_deleted',
+ ec2_model('SubnetDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'vpc_available'): lambda ec2: core_waiter.Waiter(
+ 'vpc_available',
+ ec2_model('VpcAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpcs
+ )),
+ ('EC2', 'vpc_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpc_exists',
+ ec2_model('VpcExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpcs
+ )),
+ ('EC2', 'vpc_endpoint_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpc_endpoint_exists',
+ ec2_model('VpcEndpointExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpc_endpoints
+ )),
+ ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_exists',
+ ec2_model('VpnGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_detached',
+ ec2_model('VpnGatewayDetached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('EC2', 'nat_gateway_deleted'): lambda ec2: core_waiter.Waiter(
+ 'nat_gateway_deleted',
+ ec2_model('NatGatewayDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_nat_gateways
+ )),
+ ('EC2', 'nat_gateway_available'): lambda ec2: core_waiter.Waiter(
+ 'nat_gateway_available',
+ ec2_model('NatGatewayAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_nat_gateways
+ )),
+ ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
+ 'cluster_active',
+ eks_model('ClusterActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
+ 'cluster_deleted',
+ eks_model('ClusterDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('EKS', 'fargate_profile_active'): lambda eks: core_waiter.Waiter(
+ 'fargate_profile_active',
+ eks_model('FargateProfileActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_fargate_profile
+ )),
+ ('EKS', 'fargate_profile_deleted'): lambda eks: core_waiter.Waiter(
+ 'fargate_profile_deleted',
+ eks_model('FargateProfileDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_fargate_profile
+ )),
+ ('EKS', 'nodegroup_active'): lambda eks: core_waiter.Waiter(
+ 'nodegroup_active',
+ eks_model('NodegroupActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_nodegroup
+ )),
+ ('EKS', 'nodegroup_deleted'): lambda eks: core_waiter.Waiter(
+ 'nodegroup_deleted',
+ eks_model('NodegroupDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_nodegroup
+ )),
+ ('ElasticLoadBalancing', 'any_instance_in_service'): lambda elb: core_waiter.Waiter(
+ 'any_instance_in_service',
+ elb_model('AnyInstanceInService'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_instance_health
+ )),
+ ('ElasticLoadBalancing', 'instance_deregistered'): lambda elb: core_waiter.Waiter(
+ 'instance_deregistered',
+ elb_model('InstanceDeregistered'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_instance_health
+ )),
+ ('ElasticLoadBalancing', 'instance_in_service'): lambda elb: core_waiter.Waiter(
+ 'load_balancer_created',
+ elb_model('InstanceInService'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_instance_health
+ )),
+ ('ElasticLoadBalancing', 'load_balancer_created'): lambda elb: core_waiter.Waiter(
+ 'load_balancer_created',
+ elb_model('LoadBalancerCreated'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_load_balancers
+ )),
+ ('ElasticLoadBalancing', 'load_balancer_deleted'): lambda elb: core_waiter.Waiter(
+ 'load_balancer_deleted',
+ elb_model('LoadBalancerDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ elb.describe_load_balancers
+ )),
+ ('ElasticLoadBalancingv2', 'load_balancer_available'): lambda elbv2: core_waiter.Waiter(
+ 'load_balancer_available',
+ elbv2_model('LoadBalancerAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ elbv2.describe_load_balancers
+ )),
+ ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_ipv4'): lambda elbv2: core_waiter.Waiter(
+ 'load_balancer_ip_address_type_ipv4',
+ elbv2_model('LoadBalancerIpAddressTypeIpv4'),
+ core_waiter.NormalizedOperationMethod(
+ elbv2.describe_load_balancers
+ )),
+ ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_dualstack'): lambda elbv2: core_waiter.Waiter(
+ 'load_balancers_ip_address_type_dualstack',
+ elbv2_model('LoadBalancerIpAddressTypeDualStack'),
+ core_waiter.NormalizedOperationMethod(
+ elbv2.describe_load_balancers
+ )),
+ ('ElasticLoadBalancingv2', 'load_balancers_deleted'): lambda elbv2: core_waiter.Waiter(
+ 'load_balancers_deleted',
+ elbv2_model('LoadBalancersDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ elbv2.describe_load_balancers
+ )),
+ ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
+ 'db_instance_stopped',
+ rds_model('DBInstanceStopped'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+ ('RDS', 'cluster_available'): lambda rds: core_waiter.Waiter(
+ 'cluster_available',
+ rds_model('DBClusterAvailable'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_clusters
+ )),
+ ('RDS', 'cluster_deleted'): lambda rds: core_waiter.Waiter(
+ 'cluster_deleted',
+ rds_model('DBClusterDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_clusters
+ )),
+ ('RDS', 'read_replica_promoted'): lambda rds: core_waiter.Waiter(
+ 'read_replica_promoted',
+ rds_model('ReadReplicaPromoted'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+ ('RDS', 'role_associated'): lambda rds: core_waiter.Waiter(
+ 'role_associated',
+ rds_model('RoleAssociated'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+ ('RDS', 'role_disassociated'): lambda rds: core_waiter.Waiter(
+ 'role_disassociated',
+ rds_model('RoleDisassociated'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+ ('Route53', 'resource_record_sets_changed'): lambda route53: core_waiter.Waiter(
+ 'resource_record_sets_changed',
+ route53_model('ResourceRecordSetsChanged'),
+ core_waiter.NormalizedOperationMethod(
+ route53.get_change
+ )),
+}
+
+
+def get_waiter(client, waiter_name):
+ if isinstance(client, _RetryingBotoClientWrapper):
+ return get_waiter(client.client, waiter_name)
+ try:
+ return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
+ except KeyError:
+ raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
+ waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))