summaryrefslogtreecommitdiffstats
path: root/ansible_collections/amazon/aws/plugins/module_utils
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:35 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:35 +0000
commit7fec0b69a082aaeec72fee0612766aa42f6b1b4d (patch)
treeefb569b86ca4da888717f5433e757145fa322e08 /ansible_collections/amazon/aws/plugins/module_utils
parentReleasing progress-linux version 7.7.0+dfsg-3~progress7.99u1. (diff)
downloadansible-7fec0b69a082aaeec72fee0612766aa42f6b1b4d.tar.xz
ansible-7fec0b69a082aaeec72fee0612766aa42f6b1b4d.zip
Merging upstream version 9.4.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/amazon/aws/plugins/module_utils')
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/_version.py344
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/acm.py345
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/arn.py68
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/backup.py162
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/batch.py9
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/botocore.py329
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/cloud.py48
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py307
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/common.py24
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/core.py35
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py49
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/ec2.py116
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py30
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/elbv2.py740
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/errors.py104
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/exceptions.py34
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/iam.py466
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/modules.py262
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/policy.py108
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/rds.py366
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/retries.py54
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/route53.py39
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/s3.py181
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/tagging.py53
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/tower.py13
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/transformation.py57
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/urls.py238
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/version.py14
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/waf.py148
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/waiters.py1095
30 files changed, 3069 insertions, 2769 deletions
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/_version.py b/ansible_collections/amazon/aws/plugins/module_utils/_version.py
deleted file mode 100644
index d91cf3ab4..000000000
--- a/ansible_collections/amazon/aws/plugins/module_utils/_version.py
+++ /dev/null
@@ -1,344 +0,0 @@
-# Vendored copy of distutils/version.py from CPython 3.9.5
-#
-# Implements multiple version numbering conventions for the
-# Python Module Distribution Utilities.
-#
-# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0)
-#
-
-"""Provides classes to represent module version numbers (one class for
-each style of version numbering). There are currently two such classes
-implemented: StrictVersion and LooseVersion.
-
-Every version number class implements the following interface:
- * the 'parse' method takes a string and parses it to some internal
- representation; if the string is an invalid version number,
- 'parse' raises a ValueError exception
- * the class constructor takes an optional string argument which,
- if supplied, is passed to 'parse'
- * __str__ reconstructs the string that was passed to 'parse' (or
- an equivalent string -- ie. one that will generate an equivalent
- version number instance)
- * __repr__ generates Python code to recreate the version number instance
- * _cmp compares the current instance with either another instance
- of the same class or a string (which will be parsed to an instance
- of the same class, thus must follow the same rules)
-"""
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import re
-
-try:
- RE_FLAGS = re.VERBOSE | re.ASCII
-except AttributeError:
- RE_FLAGS = re.VERBOSE
-
-
-class Version:
- """Abstract base class for version numbering classes. Just provides
- constructor (__init__) and reproducer (__repr__), because those
- seem to be the same for all version numbering classes; and route
- rich comparisons to _cmp.
- """
-
- def __init__(self, vstring=None):
- if vstring:
- self.parse(vstring)
-
- def __repr__(self):
- return "%s ('%s')" % (self.__class__.__name__, str(self))
-
- def __eq__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c == 0
-
- def __lt__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c < 0
-
- def __le__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c <= 0
-
- def __gt__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c > 0
-
- def __ge__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c >= 0
-
-
-# Interface for version-number classes -- must be implemented
-# by the following classes (the concrete ones -- Version should
-# be treated as an abstract class).
-# __init__ (string) - create and take same action as 'parse'
-# (string parameter is optional)
-# parse (string) - convert a string representation to whatever
-# internal representation is appropriate for
-# this style of version numbering
-# __str__ (self) - convert back to a string; should be very similar
-# (if not identical to) the string supplied to parse
-# __repr__ (self) - generate Python code to recreate
-# the instance
-# _cmp (self, other) - compare two version numbers ('other' may
-# be an unparsed version string, or another
-# instance of your version class)
-
-
-class StrictVersion(Version):
- """Version numbering for anal retentives and software idealists.
- Implements the standard interface for version number classes as
- described above. A version number consists of two or three
- dot-separated numeric components, with an optional "pre-release" tag
- on the end. The pre-release tag consists of the letter 'a' or 'b'
- followed by a number. If the numeric components of two version
- numbers are equal, then one with a pre-release tag will always
- be deemed earlier (lesser) than one without.
-
- The following are valid version numbers (shown in the order that
- would be obtained by sorting according to the supplied cmp function):
-
- 0.4 0.4.0 (these two are equivalent)
- 0.4.1
- 0.5a1
- 0.5b3
- 0.5
- 0.9.6
- 1.0
- 1.0.4a3
- 1.0.4b1
- 1.0.4
-
- The following are examples of invalid version numbers:
-
- 1
- 2.7.2.2
- 1.3.a4
- 1.3pl1
- 1.3c4
-
- The rationale for this version numbering system will be explained
- in the distutils documentation.
- """
-
- version_re = re.compile(r"^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$", RE_FLAGS)
-
- def parse(self, vstring):
- match = self.version_re.match(vstring)
- if not match:
- raise ValueError("invalid version number '%s'" % vstring)
-
- (major, minor, patch, prerelease, prerelease_num) = match.group(1, 2, 4, 5, 6)
-
- if patch:
- self.version = tuple(map(int, [major, minor, patch]))
- else:
- self.version = tuple(map(int, [major, minor])) + (0,)
-
- if prerelease:
- self.prerelease = (prerelease[0], int(prerelease_num))
- else:
- self.prerelease = None
-
- def __str__(self):
- if self.version[2] == 0:
- vstring = ".".join(map(str, self.version[0:2]))
- else:
- vstring = ".".join(map(str, self.version))
-
- if self.prerelease:
- vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
-
- return vstring
-
- def _cmp(self, other):
- if isinstance(other, str):
- other = StrictVersion(other)
- elif not isinstance(other, StrictVersion):
- return NotImplemented
-
- if self.version != other.version:
- # numeric versions don't match
- # prerelease stuff doesn't matter
- if self.version < other.version:
- return -1
- else:
- return 1
-
- # have to compare prerelease
- # case 1: neither has prerelease; they're equal
- # case 2: self has prerelease, other doesn't; other is greater
- # case 3: self doesn't have prerelease, other does: self is greater
- # case 4: both have prerelease: must compare them!
-
- if not self.prerelease and not other.prerelease:
- return 0
- elif self.prerelease and not other.prerelease:
- return -1
- elif not self.prerelease and other.prerelease:
- return 1
- elif self.prerelease and other.prerelease:
- if self.prerelease == other.prerelease:
- return 0
- elif self.prerelease < other.prerelease:
- return -1
- else:
- return 1
- else:
- raise AssertionError("never get here")
-
-
-# end class StrictVersion
-
-# The rules according to Greg Stein:
-# 1) a version number has 1 or more numbers separated by a period or by
-# sequences of letters. If only periods, then these are compared
-# left-to-right to determine an ordering.
-# 2) sequences of letters are part of the tuple for comparison and are
-# compared lexicographically
-# 3) recognize the numeric components may have leading zeroes
-#
-# The LooseVersion class below implements these rules: a version number
-# string is split up into a tuple of integer and string components, and
-# comparison is a simple tuple comparison. This means that version
-# numbers behave in a predictable and obvious way, but a way that might
-# not necessarily be how people *want* version numbers to behave. There
-# wouldn't be a problem if people could stick to purely numeric version
-# numbers: just split on period and compare the numbers as tuples.
-# However, people insist on putting letters into their version numbers;
-# the most common purpose seems to be:
-# - indicating a "pre-release" version
-# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
-# - indicating a post-release patch ('p', 'pl', 'patch')
-# but of course this can't cover all version number schemes, and there's
-# no way to know what a programmer means without asking him.
-#
-# The problem is what to do with letters (and other non-numeric
-# characters) in a version number. The current implementation does the
-# obvious and predictable thing: keep them as strings and compare
-# lexically within a tuple comparison. This has the desired effect if
-# an appended letter sequence implies something "post-release":
-# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
-#
-# However, if letters in a version number imply a pre-release version,
-# the "obvious" thing isn't correct. Eg. you would expect that
-# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
-# implemented here, this just isn't so.
-#
-# Two possible solutions come to mind. The first is to tie the
-# comparison algorithm to a particular set of semantic rules, as has
-# been done in the StrictVersion class above. This works great as long
-# as everyone can go along with bondage and discipline. Hopefully a
-# (large) subset of Python module programmers will agree that the
-# particular flavour of bondage and discipline provided by StrictVersion
-# provides enough benefit to be worth using, and will submit their
-# version numbering scheme to its domination. The free-thinking
-# anarchists in the lot will never give in, though, and something needs
-# to be done to accommodate them.
-#
-# Perhaps a "moderately strict" version class could be implemented that
-# lets almost anything slide (syntactically), and makes some heuristic
-# assumptions about non-digits in version number strings. This could
-# sink into special-case-hell, though; if I was as talented and
-# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
-# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
-# just as happy dealing with things like "2g6" and "1.13++". I don't
-# think I'm smart enough to do it right though.
-#
-# In any case, I've coded the test suite for this module (see
-# ../test/test_version.py) specifically to fail on things like comparing
-# "1.2a2" and "1.2". That's not because the *code* is doing anything
-# wrong, it's because the simple, obvious design doesn't match my
-# complicated, hairy expectations for real-world version numbers. It
-# would be a snap to fix the test suite to say, "Yep, LooseVersion does
-# the Right Thing" (ie. the code matches the conception). But I'd rather
-# have a conception that matches common notions about version numbers.
-
-
-class LooseVersion(Version):
- """Version numbering for anarchists and software realists.
- Implements the standard interface for version number classes as
- described above. A version number consists of a series of numbers,
- separated by either periods or strings of letters. When comparing
- version numbers, the numeric components will be compared
- numerically, and the alphabetic components lexically. The following
- are all valid version numbers, in no particular order:
-
- 1.5.1
- 1.5.2b2
- 161
- 3.10a
- 8.02
- 3.4j
- 1996.07.12
- 3.2.pl0
- 3.1.1.6
- 2g6
- 11g
- 0.960923
- 2.2beta29
- 1.13++
- 5.5.kw
- 2.0b1pl0
-
- In fact, there is no such thing as an invalid version number under
- this scheme; the rules for comparison are simple and predictable,
- but may not always give the results you want (for some definition
- of "want").
- """
-
- component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)
-
- def __init__(self, vstring=None):
- if vstring:
- self.parse(vstring)
-
- def parse(self, vstring):
- # I've given up on thinking I can reconstruct the version string
- # from the parsed tuple -- so I just store the string here for
- # use by __str__
- self.vstring = vstring
- components = [x for x in self.component_re.split(vstring) if x and x != "."]
- for i, obj in enumerate(components):
- try:
- components[i] = int(obj)
- except ValueError:
- pass
-
- self.version = components
-
- def __str__(self):
- return self.vstring
-
- def __repr__(self):
- return "LooseVersion ('%s')" % str(self)
-
- def _cmp(self, other):
- if isinstance(other, str):
- other = LooseVersion(other)
- elif not isinstance(other, LooseVersion):
- return NotImplemented
-
- if self.version == other.version:
- return 0
- if self.version < other.version:
- return -1
- if self.version > other.version:
- return 1
-
-
-# end class LooseVersion
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
index 81c65507e..ab3a9f073 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/acm.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
@@ -1,21 +1,8 @@
# -*- coding: utf-8 -*-
-#
+
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
+
# Author:
# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
# on behalf of Telstra Corporation Limited
@@ -24,199 +11,239 @@
# - acm_certificate
# - acm_certificate_info
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
"""
Common Amazon Certificate Manager facts shared between modules
"""
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from .core import is_boto3_error_code
-from .ec2 import AWSRetry
-from .ec2 import ansible_dict_to_boto3_tag_list
-from .ec2 import boto3_tag_list_to_ansible_dict
+from .botocore import is_boto3_error_code
+from .retries import AWSRetry
+from .tagging import ansible_dict_to_boto3_tag_list
+from .tagging import boto3_tag_list_to_ansible_dict
+
+
+def acm_catch_boto_exception(func):
+ def runner(*args, **kwargs):
+ module = kwargs.pop("module", None)
+ error = kwargs.pop("error", None)
+ ignore_error_codes = kwargs.pop("ignore_error_codes", [])
+
+ try:
+ return func(*args, **kwargs)
+ except is_boto3_error_code(ignore_error_codes):
+ return None
+ except (BotoCoreError, ClientError) as e:
+ if not module:
+ raise
+ module.fail_json_aws(e, msg=error)
+
+ return runner
-class ACMServiceManager(object):
+class ACMServiceManager:
"""Handles ACM Facts Services"""
def __init__(self, module):
self.module = module
- self.client = module.client('acm')
-
- @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
- def delete_certificate_with_backoff(self, client, arn):
- client.delete_certificate(CertificateArn=arn)
+ self.client = module.client("acm")
+
+ @acm_catch_boto_exception
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"])
+ def delete_certificate_with_backoff(self, arn):
+ self.client.delete_certificate(CertificateArn=arn)
+
+ @acm_catch_boto_exception
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"])
+ def list_certificates_with_backoff(self, statuses=None):
+ paginator = self.client.get_paginator("list_certificates")
+ # `list_certificates` requires explicit key type filter, or it returns only RSA_2048 certificates
+ kwargs = {
+ "Includes": {
+ "keyTypes": [
+ "RSA_1024",
+ "RSA_2048",
+ "RSA_3072",
+ "RSA_4096",
+ "EC_prime256v1",
+ "EC_secp384r1",
+ "EC_secp521r1",
+ ],
+ },
+ }
+ if statuses:
+ kwargs["CertificateStatuses"] = statuses
+ return paginator.paginate(**kwargs).build_full_result()["CertificateSummaryList"]
+
+ @acm_catch_boto_exception
+ @AWSRetry.jittered_backoff(
+ delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"]
+ )
+ def get_certificate_with_backoff(self, certificate_arn):
+ response = self.client.get_certificate(CertificateArn=certificate_arn)
+ # strip out response metadata
+ return {"Certificate": response["Certificate"], "CertificateChain": response["CertificateChain"]}
+
+ @acm_catch_boto_exception
+ @AWSRetry.jittered_backoff(
+ delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"]
+ )
+ def describe_certificate_with_backoff(self, certificate_arn):
+ return self.client.describe_certificate(CertificateArn=certificate_arn)["Certificate"]
+
+ @acm_catch_boto_exception
+ @AWSRetry.jittered_backoff(
+ delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"]
+ )
+ def list_certificate_tags_with_backoff(self, certificate_arn):
+ return self.client.list_tags_for_certificate(CertificateArn=certificate_arn)["Tags"]
+
+ @acm_catch_boto_exception
+ @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"])
+ def import_certificate_with_backoff(self, certificate, private_key, certificate_chain, arn):
+ params = {"Certificate": to_bytes(certificate), "PrivateKey": to_bytes(private_key)}
+ if arn:
+ params["CertificateArn"] = arn
+ if certificate_chain:
+ params["CertificateChain"] = certificate_chain
- def delete_certificate(self, client, module, arn):
- module.debug("Attempting to delete certificate %s" % arn)
- try:
- self.delete_certificate_with_backoff(client, arn)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn)
- module.debug("Successfully deleted certificate %s" % arn)
+ return self.client.import_certificate(**params)["CertificateArn"]
- @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
- def list_certificates_with_backoff(self, client, statuses=None):
- paginator = client.get_paginator('list_certificates')
- kwargs = dict()
- if statuses:
- kwargs['CertificateStatuses'] = statuses
- return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList']
+ # Tags are a normal Ansible style dict
+ # {'Key':'Value'}
+ @AWSRetry.jittered_backoff(
+ delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"]
+ )
+ def tag_certificate_with_backoff(self, arn, tags):
+ aws_tags = ansible_dict_to_boto3_tag_list(tags)
+ self.client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags)
- @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
- def get_certificate_with_backoff(self, client, certificate_arn):
- response = client.get_certificate(CertificateArn=certificate_arn)
- # strip out response metadata
- return {'Certificate': response['Certificate'],
- 'CertificateChain': response['CertificateChain']}
-
- @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
- def describe_certificate_with_backoff(self, client, certificate_arn):
- return client.describe_certificate(CertificateArn=certificate_arn)['Certificate']
-
- @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
- def list_certificate_tags_with_backoff(self, client, certificate_arn):
- return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags']
-
- # Returns a list of certificates
- # if domain_name is specified, returns only certificates with that domain
- # if an ARN is specified, returns only that certificate
- # only_tags is a dict, e.g. {'key':'value'}. If specified this function will return
- # only certificates which contain all those tags (key exists, value matches).
- def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None):
+ def _match_tags(self, ref_tags, cert_tags):
+ if ref_tags is None:
+ return True
try:
- all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain certificates")
- if domain_name:
- certificates = [cert for cert in all_certificates
- if cert['DomainName'] == domain_name]
- else:
- certificates = all_certificates
+ return all(k in cert_tags for k in ref_tags) and all(cert_tags.get(k) == ref_tags[k] for k in ref_tags)
+ except (TypeError, AttributeError) as e:
+ self.module.fail_json_aws(e, msg="ACM tag filtering err")
- if arn:
- # still return a list, not just one item
- certificates = [c for c in certificates if c['CertificateArn'] == arn]
+ def delete_certificate(self, *args, arn=None):
+ # hacking for backward compatibility
+ if arn is None:
+ if len(args) < 3:
+ self.module.fail_json(msg="Missing required certificate arn to delete.")
+ arn = args[2]
+ error = f"Couldn't delete certificate {arn}"
+ self.delete_certificate_with_backoff(arn, module=self.module, error=error)
+
+ def get_certificates(self, *args, domain_name=None, statuses=None, arn=None, only_tags=None, **kwargs):
+ """
+ Returns a list of certificates
+ if domain_name is specified, returns only certificates with that domain
+ if an ARN is specified, returns only that certificate
+ only_tags is a dict, e.g. {'key':'value'}. If specified this function will return
+ only certificates which contain all those tags (key exists, value matches).
+ """
+ all_certificates = self.list_certificates_with_backoff(
+ statuses=statuses, module=self.module, error="Couldn't obtain certificates"
+ )
+
+ def _filter_certificate(cert):
+ if domain_name and cert["DomainName"] != domain_name:
+ return False
+ if arn and cert["CertificateArn"] != arn:
+ return False
+ return True
+
+ certificates = list(filter(_filter_certificate, all_certificates))
results = []
for certificate in certificates:
- try:
- cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn'])
- except is_boto3_error_code('ResourceNotFoundException'):
- # The certificate was deleted after the call to list_certificates_with_backoff.
+ cert_data = self.describe_certificate_with_backoff(
+ certificate["CertificateArn"],
+ module=self.module,
+ error=f"Couldn't obtain certificate metadata for domain {certificate['DomainName']}",
+ ignore_error_codes=["ResourceNotFoundException"],
+ )
+ if cert_data is None:
continue
- except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName'])
# in some states, ACM resources do not have a corresponding cert
- if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']:
- try:
- cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn']))
- except is_boto3_error_code('ResourceNotFoundException'):
- # The certificate was deleted after the call to list_certificates_with_backoff.
+ if cert_data["Status"] not in ("PENDING_VALIDATION", "VALIDATION_TIMED_OUT", "FAILED"):
+ cert_info = self.get_certificate_with_backoff(
+ certificate["CertificateArn"],
+ module=self.module,
+ error=f"Couldn't obtain certificate data for domain {certificate['DomainName']}",
+ ignore_error_codes=["ResourceNotFoundException"],
+ )
+ if cert_info is None:
continue
- except (BotoCoreError, ClientError, KeyError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName'])
+ cert_data.update(cert_info)
+
cert_data = camel_dict_to_snake_dict(cert_data)
- try:
- tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn'])
- except is_boto3_error_code('ResourceNotFoundException'):
- # The certificate was deleted after the call to list_certificates_with_backoff.
+ tags = self.list_certificate_tags_with_backoff(
+ certificate["CertificateArn"],
+ module=self.module,
+ error=f"Couldn't obtain tags for domain {certificate['DomainName']}",
+ ignore_error_codes=["ResourceNotFoundException"],
+ )
+ if tags is None:
continue
- except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName'])
- cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags)
+ tags = boto3_tag_list_to_ansible_dict(tags)
+ if not self._match_tags(only_tags, tags):
+ continue
+ cert_data["tags"] = tags
results.append(cert_data)
-
- if only_tags:
- for tag_key in only_tags:
- try:
- results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])]
- except (TypeError, AttributeError) as e:
- for c in results:
- if 'tags' not in c:
- module.debug("cert is %s" % str(c))
- module.fail_json(msg="ACM tag filtering err", exception=e)
-
return results
- # returns the domain name of a certificate (encoded in the public cert)
- # for a given ARN
- # A cert with that ARN must already exist
- def get_domain_of_cert(self, client, module, arn):
+ def get_domain_of_cert(self, arn, **kwargs):
+ """
+ returns the domain name of a certificate (encoded in the public cert)
+ for a given ARN A cert with that ARN must already exist
+ """
if arn is None:
- module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified")
- try:
- cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn)
- return cert_data['DomainName']
-
- @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException'])
- def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn):
- if certificate_chain:
- if arn:
- ret = client.import_certificate(Certificate=to_bytes(certificate),
- PrivateKey=to_bytes(private_key),
- CertificateChain=to_bytes(certificate_chain),
- CertificateArn=arn)
- else:
- ret = client.import_certificate(Certificate=to_bytes(certificate),
- PrivateKey=to_bytes(private_key),
- CertificateChain=to_bytes(certificate_chain))
- else:
- if arn:
- ret = client.import_certificate(Certificate=to_bytes(certificate),
- PrivateKey=to_bytes(private_key),
- CertificateArn=arn)
- else:
- ret = client.import_certificate(Certificate=to_bytes(certificate),
- PrivateKey=to_bytes(private_key))
- return ret['CertificateArn']
-
- # Tags are a normal Ansible style dict
- # {'Key':'Value'}
- @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException'])
- def tag_certificate_with_backoff(self, client, arn, tags):
- aws_tags = ansible_dict_to_boto3_tag_list(tags)
- client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags)
-
- def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None):
+ self.module.fail_json(msg="Internal error with ACM domain fetching, no certificate ARN specified")
+ error = f"Couldn't obtain certificate data for arn {arn}"
+ cert_data = self.describe_certificate_with_backoff(certificate_arn=arn, module=self.module, error=error)
+ return cert_data["DomainName"]
+ def import_certificate(self, *args, certificate, private_key, arn=None, certificate_chain=None, tags=None):
original_arn = arn
# upload cert
- try:
- arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't upload new certificate")
-
+ params = {
+ "certificate": certificate,
+ "private_key": private_key,
+ "certificate_chain": certificate_chain,
+ "arn": arn,
+ "module": self.module,
+ "error": "Couldn't upload new certificate",
+ }
+ arn = self.import_certificate_with_backoff(**params)
if original_arn and (arn != original_arn):
# I'm not sure whether the API guarentees that the ARN will not change
# I'm failing just in case.
# If I'm wrong, I'll catch it in the integration tests.
- module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn))
+ self.module.fail_json(msg=f"ARN changed with ACM update, from {original_arn} to {arn}")
# tag that cert
try:
- self.tag_certificate_with_backoff(client, arn, tags)
+ self.tag_certificate_with_backoff(arn, tags)
except (BotoCoreError, ClientError) as e:
- module.debug("Attempting to delete the cert we just created, arn=%s" % arn)
try:
- self.delete_certificate_with_backoff(client, arn)
+ self.delete_certificate_with_backoff(arn)
except (BotoCoreError, ClientError):
- module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.")
- module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn)
- module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn)
+ self.module.warn(
+ f"Certificate {arn} exists, and is not tagged. So Ansible will not see it on the next run."
+ )
+ self.module.fail_json_aws(e, msg=f"Couldn't tag certificate {arn}, couldn't delete it either")
+ self.module.fail_json_aws(e, msg=f"Couldn't tag certificate {arn}")
return arn
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/arn.py b/ansible_collections/amazon/aws/plugins/module_utils/arn.py
index ac8dfc9e0..d62b4c4d8 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/arn.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/arn.py
@@ -1,35 +1,51 @@
-#
+# -*- coding: utf-8 -*-
+
# Copyright 2017 Michael De La Rue | Ansible
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import re
+def validate_aws_arn(
+ arn, partition=None, service=None, region=None, account_id=None, resource=None, resource_type=None, resource_id=None
+):
+ details = parse_aws_arn(arn)
+
+ if not details:
+ return False
+
+ if partition and details.get("partition") != partition:
+ return False
+ if service and details.get("service") != service:
+ return False
+ if region and details.get("region") != region:
+ return False
+ if account_id and details.get("account_id") != account_id:
+ return False
+ if resource and details.get("resource") != resource:
+ return False
+ if resource_type and details.get("resource_type") != resource_type:
+ return False
+ if resource_id and details.get("resource_id") != resource_id:
+ return False
+
+ return True
+
+
def parse_aws_arn(arn):
"""
+ Based on https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html
+
The following are the general formats for ARNs.
arn:partition:service:region:account-id:resource-id
arn:partition:service:region:account-id:resource-type/resource-id
arn:partition:service:region:account-id:resource-type:resource-id
The specific formats depend on the resource.
The ARNs for some resources omit the Region, the account ID, or both the Region and the account ID.
+
+ Note: resource_type handling is very naive, for complex cases it may be necessary to use
+ "resource" directly instead of resource_type, this will include the resource type and full ID,
+ including all paths.
"""
m = re.search(r"arn:(aws(-([a-z\-]+))?):([\w-]+):([a-z0-9\-]*):(\d*|aws|aws-managed):(.*)", arn)
if m is None:
@@ -41,6 +57,12 @@ def parse_aws_arn(arn):
result.update(dict(account_id=m.group(6)))
result.update(dict(resource=m.group(7)))
+ m2 = re.search(r"^(.*?)[:/](.+)$", m.group(7))
+ if m2 is None:
+ result.update(dict(resource_type=None, resource_id=m.group(7)))
+ else:
+ result.update(dict(resource_type=m2.group(1), resource_id=m2.group(2)))
+
return result
@@ -59,11 +81,11 @@ def is_outpost_arn(arn):
if not details:
return False
- service = details.get('service') or ""
- if service.lower() != 'outposts':
+ service = details.get("service") or ""
+ if service.lower() != "outposts":
return False
- resource = details.get('resource') or ""
- if not re.match('^outpost/op-[a-f0-9]{17}$', resource):
+ resource = details.get("resource") or ""
+ if not re.match("^outpost/op-[a-f0-9]{17}$", resource):
return False
return True
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/backup.py b/ansible_collections/amazon/aws/plugins/module_utils/backup.py
new file mode 100644
index 000000000..907879a8a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/backup.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by HAS_BOTO3
+
+from typing import Union
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+
+def get_backup_resource_tags(module, backup_client, resource):
+ try:
+ response = backup_client.list_tags(ResourceArn=resource)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg=f"Failed to list tags on the resource {resource}")
+
+ return response["Tags"]
+
+
+def _list_backup_plans(client, backup_plan_name):
+ first_iteration = False
+ next_token = None
+
+ # We can not use the paginator at the moment because if was introduced after boto3 version 1.22
+ # paginator = client.get_paginator("list_backup_plans")
+ # result = paginator.paginate(**params).build_full_result()["BackupPlansList"]
+
+ response = client.list_backup_plans()
+ next_token = response.get("NextToken", None)
+
+ if next_token is None:
+ entries = response["BackupPlansList"]
+ for backup_plan in entries:
+ if backup_plan_name == backup_plan["BackupPlanName"]:
+ return backup_plan["BackupPlanId"]
+
+ while next_token is not None:
+ if first_iteration:
+ response = client.list_backup_plans(NextToken=next_token)
+ first_iteration = True
+ entries = response["BackupPlansList"]
+ for backup_plan in entries:
+ if backup_plan_name == backup_plan["BackupPlanName"]:
+ return backup_plan["BackupPlanId"]
+ next_token = response.get("NextToken")
+
+
+def get_plan_details(module, client, backup_plan_name: str):
+ backup_plan_id = _list_backup_plans(client, backup_plan_name)
+
+ if not backup_plan_id:
+ return []
+
+ try:
+ result = client.get_backup_plan(BackupPlanId=backup_plan_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg=f"Failed to describe plan {backup_plan_id}")
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_backup_plan = []
+
+ try:
+ resource = result.get("BackupPlanArn", None)
+ tag_dict = get_backup_resource_tags(module, client, resource)
+ result.update({"tags": tag_dict})
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get the backup plan tags")
+
+ snaked_backup_plan.append(camel_dict_to_snake_dict(result, ignore_list="tags"))
+
+ # Remove AWS API response and add top-level plan name
+ for v in snaked_backup_plan:
+ if "response_metadata" in v:
+ del v["response_metadata"]
+ v["backup_plan_name"] = v["backup_plan"]["backup_plan_name"]
+
+ return snaked_backup_plan
+
+
+def _list_backup_selections(client, module, plan_id):
+ first_iteration = False
+ next_token = None
+ selections = []
+
+ # We can not use the paginator at the moment because if was introduced after boto3 version 1.22
+ # paginator = client.get_paginator("list_backup_selections")
+ # result = paginator.paginate(**params).build_full_result()["BackupSelectionsList"]
+
+ try:
+ response = client.list_backup_selections(BackupPlanId=plan_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to list AWS backup selections")
+
+ next_token = response.get("NextToken", None)
+
+ if next_token is None:
+ return response["BackupSelectionsList"]
+
+ while next_token:
+ if first_iteration:
+ try:
+ response = client.list_backup_selections(BackupPlanId=plan_id, NextToken=next_token)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to list AWS backup selections")
+ first_iteration = True
+ selections.append(response["BackupSelectionsList"])
+ next_token = response.get("NextToken")
+
+
+def _get_backup_selection(client, module, plan_id, selection_id):
+ try:
+ result = client.get_backup_selection(BackupPlanId=plan_id, SelectionId=selection_id)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg=f"Failed to describe selection {selection_id}")
+ return result or []
+
+
+def get_selection_details(module, client, plan_name: str, selection_name: Union[str, list]):
+ result = []
+
+ plan = get_plan_details(module, client, plan_name)
+
+ if not plan:
+ module.fail_json(msg=f"The backup plan {plan_name} does not exist. Please create one first.")
+
+ plan_id = plan[0]["backup_plan_id"]
+
+ selection_list = _list_backup_selections(client, module, plan_id)
+
+ if selection_name:
+ for selection in selection_list:
+ if isinstance(selection_name, list):
+ for name in selection_name:
+ if selection["SelectionName"] == name:
+ selection_id = selection["SelectionId"]
+ selection_info = _get_backup_selection(client, module, plan_id, selection_id)
+ result.append(selection_info)
+ if isinstance(selection_name, str):
+ if selection["SelectionName"] == selection_name:
+ selection_id = selection["SelectionId"]
+ result.append(_get_backup_selection(client, module, plan_id, selection_id))
+ break
+ else:
+ for selection in selection_list:
+ selection_id = selection["SelectionId"]
+ result.append(_get_backup_selection(client, module, plan_id, selection_id))
+
+ for v in result:
+ if "ResponseMetadata" in v:
+ del v["ResponseMetadata"]
+ if "BackupSelection" in v:
+ for backup_selection_key in v["BackupSelection"]:
+ v[backup_selection_key] = v["BackupSelection"][backup_selection_key]
+ del v["BackupSelection"]
+
+ return result
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/batch.py b/ansible_collections/amazon/aws/plugins/module_utils/batch.py
index c27214519..47281307e 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/batch.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/batch.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
#
# This code is part of Ansible, but is an independent component.
@@ -24,14 +26,11 @@
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
+
"""
This module adds shared support for Batch modules.
"""
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
@@ -43,7 +42,7 @@ def cc(key):
:param key:
:return:
"""
- components = key.split('_')
+ components = key.split("_")
return components[0] + "".join([token.capitalize() for token in components[1:]])
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
index a8a014c20..858e4e593 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -31,9 +33,6 @@ A set of helper functions designed to help with initializing boto3/botocore
connections.
"""
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import json
import os
import traceback
@@ -42,19 +41,43 @@ BOTO3_IMP_ERR = None
try:
import boto3
import botocore
+
HAS_BOTO3 = True
except ImportError:
BOTO3_IMP_ERR = traceback.format_exc()
HAS_BOTO3 = False
+try:
+ from packaging.version import Version
+
+ HAS_PACKAGING = True
+except ImportError:
+ HAS_PACKAGING = False
+
from ansible.module_utils._text import to_native
from ansible.module_utils.ansible_release import __version__
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.six import binary_type
from ansible.module_utils.six import text_type
+from .common import get_collection_info
+from .exceptions import AnsibleBotocoreError
from .retries import AWSRetry
+MINIMUM_BOTOCORE_VERSION = "1.29.0"
+MINIMUM_BOTO3_VERSION = "1.26.0"
+
+
+def _get_user_agent_string():
+ info = get_collection_info()
+ result = f"APN/1.0 Ansible/{__version__}"
+ if info["name"]:
+ if info["version"] is not None:
+ result += f" {info['name']}/{info['version']}"
+ else:
+ result += f" {info['name']}"
+ return result
+
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
"""
@@ -68,13 +91,35 @@ def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError as e:
- module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
- botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
- module.fail_json(msg=to_native(e))
+ module.fail_json(
+ msg=f"Couldn't connect to AWS: {to_native(e)}",
+ )
+ except (
+ botocore.exceptions.ProfileNotFound,
+ botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError,
+ botocore.exceptions.ConfigParseError,
+ ) as e:
+ module.fail_json(
+ msg=to_native(e),
+ )
except botocore.exceptions.NoRegionError:
- module.fail_json(msg="The %s module requires a region and none was found in configuration, "
- "environment variables or module parameters" % module._name)
+ module.fail_json(
+ msg=f"The {module._name} module requires a region and none was found in configuration, "
+ "environment variables or module parameters",
+ )
+
+
+def _merge_botocore_config(config_a, config_b):
+ """
+ Merges the extra configuration options from config_b into config_a.
+ Supports both botocore.config.Config objects and dicts
+ """
+ if not config_b:
+ return config_a
+ if not isinstance(config_b, botocore.config.Config):
+ config_b = botocore.config.Config(**config_b)
+ return config_a.merge(config_b)
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
@@ -82,22 +127,23 @@ def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **par
Builds a boto3 resource/client connection cleanly wrapping the most common failures.
No exceptions are caught/handled.
"""
- profile = params.pop('profile_name', None)
-
- if conn_type not in ['both', 'resource', 'client']:
- raise ValueError('There is an issue in the calling code. You '
- 'must specify either both, resource, or client to '
- 'the conn_type parameter in the boto3_conn function '
- 'call')
+ profile = params.pop("profile_name", None)
+
+ if conn_type not in ["both", "resource", "client"]:
+ raise ValueError(
+ "There is an issue in the calling code. You "
+ "must specify either both, resource, or client to "
+ "the conn_type parameter in the boto3_conn function "
+ "call"
+ )
+ # default config with user agent
config = botocore.config.Config(
- user_agent_extra='Ansible/{0}'.format(__version__),
+ user_agent=_get_user_agent_string(),
)
- if params.get('config') is not None:
- config = config.merge(params.pop('config'))
- if params.get('aws_config') is not None:
- config = config.merge(params.pop('aws_config'))
+ for param in ("config", "aws_config"):
+ config = _merge_botocore_config(config, params.pop(param, None))
session = boto3.session.Session(
profile_name=profile,
@@ -105,13 +151,13 @@ def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **par
enable_placebo(session)
- if conn_type == 'resource':
+ if conn_type == "resource":
return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
- elif conn_type == 'client':
+ elif conn_type == "client":
return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
else:
- client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
- resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ client = session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ resource = session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
return client, resource
@@ -127,106 +173,77 @@ def boto_exception(err):
:param err: Exception from boto
:return: Error message
"""
- if hasattr(err, 'error_message'):
+ if hasattr(err, "error_message"):
error = err.error_message
- elif hasattr(err, 'message'):
- error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
+ elif hasattr(err, "message"):
+ error = str(err.message) + " " + str(err) + " - " + str(type(err))
else:
- error = '%s: %s' % (Exception, err)
+ error = f"{Exception}: {err}"
return error
-def get_aws_region(module, boto3=None):
- region = module.params.get('region')
+def _aws_region(params):
+ region = params.get("region")
if region:
return region
if not HAS_BOTO3:
- module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
+ raise AnsibleBotocoreError(message=missing_required_lib("boto3 and botocore"), exception=BOTO3_IMP_ERR)
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
try:
- profile_name = module.params.get('profile')
- return botocore.session.Session(profile=profile_name).get_config_variable('region')
+ # Botocore doesn't like empty strings, make sure we default to None in the case of an empty
+ # string.
+ profile_name = params.get("profile") or None
+ return botocore.session.Session(profile=profile_name).get_config_variable("region")
except botocore.exceptions.ProfileNotFound:
return None
-def get_aws_connection_info(module, boto3=None):
-
- # Check module args for credentials, then check environment vars
- # access_key
-
- endpoint_url = module.params.get('endpoint_url')
- access_key = module.params.get('access_key')
- secret_key = module.params.get('secret_key')
- session_token = module.params.get('session_token')
- region = get_aws_region(module)
- profile_name = module.params.get('profile')
- validate_certs = module.params.get('validate_certs')
- ca_bundle = module.params.get('aws_ca_bundle')
- config = module.params.get('aws_config')
-
- # Only read the profile environment variables if we've *not* been passed
- # any credentials as parameters.
- if not profile_name and not access_key and not secret_key:
- if os.environ.get('AWS_PROFILE'):
- profile_name = os.environ.get('AWS_PROFILE')
- if os.environ.get('AWS_DEFAULT_PROFILE'):
- profile_name = os.environ.get('AWS_DEFAULT_PROFILE')
-
+def get_aws_region(module, boto3=None):
+ try:
+ return _aws_region(module.params)
+ except AnsibleBotocoreError as e:
+ if e.exception:
+ module.fail_json(msg=e.message, exception=e.exception)
+ else:
+ module.fail_json(msg=e.message)
+
+
+def _aws_connection_info(params):
+ endpoint_url = params.get("endpoint_url")
+ access_key = params.get("access_key")
+ secret_key = params.get("secret_key")
+ session_token = params.get("session_token")
+ region = _aws_region(params)
+ profile_name = params.get("profile")
+ validate_certs = params.get("validate_certs")
+ ca_bundle = params.get("aws_ca_bundle")
+ config = params.get("aws_config")
+
+ # Caught here so that they can be deliberately set to '' to avoid conflicts when environment
+ # variables are also being used
if profile_name and (access_key or secret_key or session_token):
- module.fail_json(msg="Passing both a profile and access tokens is not supported.")
+ raise AnsibleBotocoreError(message="Passing both a profile and access tokens is not supported.")
# Botocore doesn't like empty strings, make sure we default to None in the case of an empty
# string.
if not access_key:
- # AWS_ACCESS_KEY_ID is the one supported by the AWS CLI
- # AWS_ACCESS_KEY is to match up with our parameter name
- if os.environ.get('AWS_ACCESS_KEY_ID'):
- access_key = os.environ['AWS_ACCESS_KEY_ID']
- elif os.environ.get('AWS_ACCESS_KEY'):
- access_key = os.environ['AWS_ACCESS_KEY']
- # Deprecated - 'EC2' implies just EC2, but is global
- elif os.environ.get('EC2_ACCESS_KEY'):
- access_key = os.environ['EC2_ACCESS_KEY']
- else:
- # in case access_key came in as empty string
- access_key = None
-
+ access_key = None
if not secret_key:
- # AWS_SECRET_ACCESS_KEY is the one supported by the AWS CLI
- # AWS_SECRET_KEY is to match up with our parameter name
- if os.environ.get('AWS_SECRET_ACCESS_KEY'):
- secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
- elif os.environ.get('AWS_SECRET_KEY'):
- secret_key = os.environ['AWS_SECRET_KEY']
- # Deprecated - 'EC2' implies just EC2, but is global
- elif os.environ.get('EC2_SECRET_KEY'):
- secret_key = os.environ['EC2_SECRET_KEY']
- else:
- # in case secret_key came in as empty string
- secret_key = None
-
+ secret_key = None
if not session_token:
- # AWS_SESSION_TOKEN is supported by the AWS CLI
- if os.environ.get('AWS_SESSION_TOKEN'):
- session_token = os.environ['AWS_SESSION_TOKEN']
- # Deprecated - boto
- elif os.environ.get('AWS_SECURITY_TOKEN'):
- session_token = os.environ['AWS_SECURITY_TOKEN']
- # Deprecated - 'EC2' implies just EC2, but is global
- elif os.environ.get('EC2_SECURITY_TOKEN'):
- session_token = os.environ['EC2_SECURITY_TOKEN']
- else:
- # in case secret_token came in as empty string
- session_token = None
+ session_token = None
if profile_name:
- boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
- boto_params['profile_name'] = profile_name
+ boto_params = dict(
+ aws_access_key_id=None,
+ aws_secret_access_key=None,
+ aws_session_token=None,
+ profile_name=profile_name,
+ )
else:
boto_params = dict(
aws_access_key_id=access_key,
@@ -235,20 +252,30 @@ def get_aws_connection_info(module, boto3=None):
)
if validate_certs and ca_bundle:
- boto_params['verify'] = ca_bundle
+ boto_params["verify"] = ca_bundle
else:
- boto_params['verify'] = validate_certs
+ boto_params["verify"] = validate_certs
if config is not None:
- boto_params['aws_config'] = botocore.config.Config(**config)
+ boto_params["aws_config"] = botocore.config.Config(**config)
for param, value in boto_params.items():
if isinstance(value, binary_type):
- boto_params[param] = text_type(value, 'utf-8', 'strict')
+ boto_params[param] = text_type(value, "utf-8", "strict")
return region, endpoint_url, boto_params
+def get_aws_connection_info(module, boto3=None):
+ try:
+ return _aws_connection_info(module.params)
+ except AnsibleBotocoreError as e:
+ if e.exception:
+ module.fail_json(msg=e.message, exception=e.exception)
+ else:
+ module.fail_json(msg=e.message)
+
+
def _paginated_query(client, paginator_name, **params):
paginator = client.get_paginator(paginator_name)
result = paginator.paginate(**params).build_full_result()
@@ -282,10 +309,11 @@ def gather_sdk_versions():
"""
if not HAS_BOTO3:
return {}
- import boto3
- import botocore
- return dict(boto3_version=boto3.__version__,
- botocore_version=botocore.__version__)
+
+ return dict(
+ boto3_version=boto3.__version__,
+ botocore_version=botocore.__version__,
+ )
def is_boto3_error_code(code, e=None):
@@ -302,14 +330,16 @@ def is_boto3_error_code(code, e=None):
# handle the generic error case for all other codes
"""
from botocore.exceptions import ClientError
+
if e is None:
import sys
+
dummy, e, dummy = sys.exc_info()
if not isinstance(code, list):
code = [code]
- if isinstance(e, ClientError) and e.response['Error']['Code'] in code:
+ if isinstance(e, ClientError) and e.response["Error"]["Code"] in code:
return ClientError
- return type('NeverEverRaisedException', (Exception,), {})
+ return type("NeverEverRaisedException", (Exception,), {})
def is_boto3_error_message(msg, e=None):
@@ -326,12 +356,14 @@ def is_boto3_error_message(msg, e=None):
# handle the generic error case for all other codes
"""
from botocore.exceptions import ClientError
+
if e is None:
import sys
+
dummy, e, dummy = sys.exc_info()
- if isinstance(e, ClientError) and msg in e.response['Error']['Message']:
+ if isinstance(e, ClientError) and msg in e.response["Error"]["Message"]:
return ClientError
- return type('NeverEverRaisedException', (Exception,), {})
+ return type("NeverEverRaisedException", (Exception,), {})
def get_boto3_client_method_parameters(client, method_name, required=False):
@@ -348,7 +380,7 @@ def get_boto3_client_method_parameters(client, method_name, required=False):
# Used by normalize_boto3_result
def _boto3_handler(obj):
- if hasattr(obj, 'isoformat'):
+ if hasattr(obj, "isoformat"):
return obj.isoformat()
else:
return obj
@@ -371,6 +403,7 @@ def enable_placebo(session):
"""
if "_ANSIBLE_PLACEBO_RECORD" in os.environ:
import placebo
+
existing_entries = os.listdir(os.environ["_ANSIBLE_PLACEBO_RECORD"])
idx = len(existing_entries)
data_path = f"{os.environ['_ANSIBLE_PLACEBO_RECORD']}/{idx}"
@@ -379,10 +412,12 @@ def enable_placebo(session):
pill.record()
if "_ANSIBLE_PLACEBO_REPLAY" in os.environ:
import shutil
+
import placebo
+
existing_entries = sorted([int(i) for i in os.listdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])])
idx = str(existing_entries[0])
- data_path = os.environ['_ANSIBLE_PLACEBO_REPLAY'] + "/" + idx
+ data_path = os.environ["_ANSIBLE_PLACEBO_REPLAY"] + "/" + idx
try:
shutil.rmtree("_tmp")
except FileNotFoundError:
@@ -392,3 +427,73 @@ def enable_placebo(session):
os.rmdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])
pill = placebo.attach(session, data_path="_tmp")
pill.playback()
+
+
+def check_sdk_version_supported(botocore_version=None, boto3_version=None, warn=None):
+ """Checks to see if the available boto3 / botocore versions are supported
+ args:
+ botocore_version: (str) overrides the minimum version of botocore supported by the collection
+ boto3_version: (str) overrides the minimum version of boto3 supported by the collection
+ warn: (Callable) invoked with a string message if boto3/botocore are less than the
+ supported versions
+ raises:
+ AnsibleBotocoreError - If botocore/boto3 is missing
+ returns
+ False if boto3 or botocore is less than the minimum supported versions
+ True if boto3 and botocore are greater than or equal the the minimum supported versions
+ """
+
+ botocore_version = botocore_version or MINIMUM_BOTOCORE_VERSION
+ boto3_version = boto3_version or MINIMUM_BOTO3_VERSION
+
+ if not HAS_BOTO3:
+ raise AnsibleBotocoreError(message=missing_required_lib("botocore and boto3"))
+
+ supported = True
+
+ if not HAS_PACKAGING:
+ if warn:
+ warn("packaging.version Python module not installed, unable to check AWS SDK versions")
+ return True
+ if not botocore_at_least(botocore_version):
+ supported = False
+ if warn:
+ warn(f"botocore < {MINIMUM_BOTOCORE_VERSION} is not supported or tested. Some features may not work.")
+ if not boto3_at_least(boto3_version):
+ supported = False
+ if warn:
+ warn(f"boto3 < {MINIMUM_BOTO3_VERSION} is not supported or tested. Some features may not work.")
+
+ return supported
+
+
+def _version_at_least(a, b):
+ if not HAS_PACKAGING:
+ return True
+ return Version(a) >= Version(b)
+
+
+def boto3_at_least(desired):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ Usage:
+ if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
+ # conditionally fail on old boto3 versions if a specific feature is not supported
+ module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
+ """
+ existing = gather_sdk_versions()
+ return _version_at_least(existing["boto3_version"], desired)
+
+
+def botocore_at_least(desired):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ Usage:
+ if not module.botocore_at_least('1.2.3'):
+ module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
+ if not module.botocore_at_least('1.5.3'):
+ module.warn('Botocore did not include waiters for Service X before 1.5.3. '
+ 'To wait until Service X resources are fully available, update botocore.')
+ """
+ existing = gather_sdk_versions()
+ return _version_at_least(existing["botocore_version"], desired)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
index e690c0a86..4b2775cb3 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2021 Ansible Project
#
# This code is part of Ansible, but is an independent component.
@@ -24,15 +26,10 @@
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-import time
import functools
import random
-import ansible.module_utils.common.warnings as ansible_warnings
+import time
class BackoffIterator:
@@ -62,7 +59,9 @@ class BackoffIterator:
return return_value
-def _retry_func(func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class):
+def _retry_func(
+ func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class
+):
counter = 0
for sleep_time in sleep_time_generator:
try:
@@ -108,6 +107,7 @@ class CloudRetry:
else:
# iterable
return True
+
return _is_iterable() and response_code in catch_extra_error_codes
@classmethod
@@ -125,7 +125,9 @@ class CloudRetry:
status_code_from_except_f=status_code_from_exception,
base_class=cls.base_class,
)
+
return _retry_wrapper
+
return retry_decorator
@classmethod
@@ -179,35 +181,3 @@ class CloudRetry:
catch_extra_error_codes=catch_extra_error_codes,
sleep_time_generator=sleep_time_generator,
)
-
- @classmethod
- def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
- """
- Wrap a callable with retry behavior.
- Developers should use CloudRetry.exponential_backoff instead.
- This method has been deprecated and will be removed in release 6.0.0, consider using exponential_backoff method instead.
- Args:
- retries (int): Number of times to retry a failed request before giving up
- default=10
- delay (int or float): Initial delay between retries in seconds
- default=3
- backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry
- default=1.1
- catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry
- default=None
- Returns:
- Callable: A generator that calls the decorated function using an exponential backoff.
- """
- # This won't emit a warning (we don't have the context available to us), but will trigger
- # sanity failures as we prepare for 6.0.0
- ansible_warnings.deprecate(
- 'CloudRetry.backoff has been deprecated, please use CloudRetry.exponential_backoff instead',
- version='6.0.0', collection_name='amazon.aws')
-
- return cls.exponential_backoff(
- retries=tries,
- delay=delay,
- backoff=backoff,
- max_delay=None,
- catch_extra_error_codes=catch_extra_error_codes,
- )
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
index c628bff14..342adc82d 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py
@@ -1,20 +1,8 @@
# -*- coding: utf-8 -*-
-#
+
# Copyright (c) 2017 Willem van Ketwich
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
# Author:
# - Willem van Ketwich <willem@vanketwich.com.au>
#
@@ -22,116 +10,147 @@
# - cloudfront_distribution
# - cloudfront_invalidation
# - cloudfront_origin_access_identity
+
"""
Common cloudfront facts shared between modules
"""
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from functools import partial
try:
import botocore
except ImportError:
pass
-from .ec2 import AWSRetry
-from .ec2 import boto3_tag_list_to_ansible_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from .retries import AWSRetry
+from .tagging import boto3_tag_list_to_ansible_dict
-class CloudFrontFactsServiceManager(object):
- """Handles CloudFront Facts Services"""
- def __init__(self, module):
- self.module = module
- self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff())
+class CloudFrontFactsServiceManagerFailure(Exception):
+ pass
- def get_distribution(self, distribution_id):
- try:
- return self.client.get_distribution(Id=distribution_id, aws_retry=True)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing distribution")
- def get_distribution_config(self, distribution_id):
- try:
- return self.client.get_distribution_config(Id=distribution_id, aws_retry=True)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing distribution configuration")
+def cloudfront_facts_keyed_list_helper(list_to_key):
+ result = dict()
+ for item in list_to_key:
+ distribution_id = item["Id"]
+ if "Items" in item["Aliases"]:
+ result.update({alias: item for alias in item["Aliases"]["Items"]})
+ result.update({distribution_id: item})
+ return result
- def get_origin_access_identity(self, origin_access_identity_id):
- try:
- return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id, aws_retry=True)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing origin access identity")
- def get_origin_access_identity_config(self, origin_access_identity_id):
- try:
- return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id, aws_retry=True)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
+@AWSRetry.jittered_backoff()
+def _cloudfront_paginate_build_full_result(client, client_method, **kwargs):
+ paginator = client.get_paginator(client_method)
+ return paginator.paginate(**kwargs).build_full_result()
- def get_invalidation(self, distribution_id, invalidation_id):
- try:
- return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id, aws_retry=True)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing invalidation")
- def get_streaming_distribution(self, distribution_id):
- try:
- return self.client.get_streaming_distribution(Id=distribution_id, aws_retry=True)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing streaming distribution")
-
- def get_streaming_distribution_config(self, distribution_id):
- try:
- return self.client.get_streaming_distribution_config(Id=distribution_id, aws_retry=True)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing streaming distribution")
-
- def list_origin_access_identities(self):
- try:
- paginator = self.client.get_paginator('list_cloud_front_origin_access_identities')
- result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {})
- return result.get('Items', [])
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
-
- def list_distributions(self, keyed=True):
- try:
- paginator = self.client.get_paginator('list_distributions')
- result = paginator.paginate().build_full_result().get('DistributionList', {})
- distribution_list = result.get('Items', [])
- if not keyed:
- return distribution_list
- return self.keyed_list_helper(distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing distributions")
-
- def list_distributions_by_web_acl_id(self, web_acl_id):
- try:
- result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id, aws_retry=True)
- distribution_list = result.get('DistributionList', {}).get('Items', [])
- return self.keyed_list_helper(distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
+class CloudFrontFactsServiceManager:
+ """Handles CloudFront Facts Services"""
- def list_invalidations(self, distribution_id):
- try:
- paginator = self.client.get_paginator('list_invalidations')
- result = paginator.paginate(DistributionId=distribution_id).build_full_result()
- return result.get('InvalidationList', {}).get('Items', [])
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing invalidations")
+ CLOUDFRONT_CLIENT_API_MAPPING = {
+ "get_distribution": {
+ "error": "Error describing distribution",
+ },
+ "get_distribution_config": {
+ "error": "Error describing distribution configuration",
+ },
+ "get_origin_access_identity": {
+ "error": "Error describing origin access identity",
+ "client_api": "get_cloud_front_origin_access_identity",
+ },
+ "get_origin_access_identity_config": {
+ "error": "Error describing origin access identity configuration",
+ "client_api": "get_cloud_front_origin_access_identity_config",
+ },
+ "get_streaming_distribution": {
+ "error": "Error describing streaming distribution",
+ },
+ "get_streaming_distribution_config": {
+ "error": "Error describing streaming distribution",
+ },
+ "get_invalidation": {
+ "error": "Error describing invalidation",
+ },
+ "list_distributions_by_web_acl_id": {
+ "error": "Error listing distributions by web acl id",
+ "post_process": lambda x: cloudfront_facts_keyed_list_helper(
+ x.get("DistributionList", {}).get("Items", [])
+ ),
+ },
+ }
+
+ CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING = {
+ "list_origin_access_identities": {
+ "error": "Error listing cloud front origin access identities",
+ "client_api": "list_cloud_front_origin_access_identities",
+ "key": "CloudFrontOriginAccessIdentityList",
+ },
+ "list_distributions": {
+ "error": "Error listing distributions",
+ "key": "DistributionList",
+ "keyed": True,
+ },
+ "list_invalidations": {"error": "Error listing invalidations", "key": "InvalidationList"},
+ "list_streaming_distributions": {
+ "error": "Error listing streaming distributions",
+ "key": "StreamingDistributionList",
+ "keyed": True,
+ },
+ }
- def list_streaming_distributions(self, keyed=True):
- try:
- paginator = self.client.get_paginator('list_streaming_distributions')
- result = paginator.paginate().build_full_result()
- streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', [])
- if not keyed:
- return streaming_distribution_list
- return self.keyed_list_helper(streaming_distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing streaming distributions")
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client("cloudfront", retry_decorator=AWSRetry.jittered_backoff())
+
+ def describe_cloudfront_property(self, client_method, error, post_process, **kwargs):
+ fail_if_error = kwargs.pop("fail_if_error", True)
+ try:
+ method = getattr(self.client, client_method)
+ api_kwargs = snake_dict_to_camel_dict(kwargs, capitalize_first=True)
+ result = method(aws_retry=True, **api_kwargs)
+ result.pop("ResponseMetadata", None)
+ if post_process:
+ result = post_process(result)
+ return result
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not fail_if_error:
+ raise
+ self.module.fail_json_aws(e, msg=error)
+
+ def paginate_list_cloudfront_property(self, client_method, key, default_keyed, error, **kwargs):
+ fail_if_error = kwargs.pop("fail_if_error", True)
+ try:
+ keyed = kwargs.pop("keyed", default_keyed)
+ api_kwargs = snake_dict_to_camel_dict(kwargs, capitalize_first=True)
+ result = _cloudfront_paginate_build_full_result(self.client, client_method, **api_kwargs)
+ items = result.get(key, {}).get("Items", [])
+ if keyed:
+ items = cloudfront_facts_keyed_list_helper(items)
+ return items
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not fail_if_error:
+ raise
+ self.module.fail_json_aws(e, msg=error)
+
+ def __getattr__(self, name):
+ if name in self.CLOUDFRONT_CLIENT_API_MAPPING:
+ client_method = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("client_api", name)
+ error = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("error", "")
+ post_process = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("post_process")
+ return partial(self.describe_cloudfront_property, client_method, error, post_process)
+
+ elif name in self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING:
+ client_method = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("client_api", name)
+ error = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("error", "")
+ key = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("key")
+ keyed = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("keyed", False)
+ return partial(self.paginate_list_cloudfront_property, client_method, key, keyed, error)
+
+ raise CloudFrontFactsServiceManagerFailure(f"Method {name} is not currently supported")
def summary(self):
summary_dict = {}
@@ -142,36 +161,38 @@ class CloudFrontFactsServiceManager(object):
def summary_get_origin_access_identity_list(self):
try:
- origin_access_identity_list = {'origin_access_identities': []}
- origin_access_identities = self.list_origin_access_identities()
- for origin_access_identity in origin_access_identities:
- oai_id = origin_access_identity['Id']
+ origin_access_identities = []
+ for origin_access_identity in self.list_origin_access_identities():
+ oai_id = origin_access_identity["Id"]
oai_full_response = self.get_origin_access_identity(oai_id)
- oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
- origin_access_identity_list['origin_access_identities'].append(oai_summary)
- return origin_access_identity_list
+ oai_summary = {"Id": oai_id, "ETag": oai_full_response["ETag"]}
+ origin_access_identities.append(oai_summary)
+ return {"origin_access_identities": origin_access_identities}
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
+ def list_resource_tags(self, resource_arn):
+ return self.client.list_tags_for_resource(Resource=resource_arn, aws_retry=True)
+
def summary_get_distribution_list(self, streaming=False):
try:
- list_name = 'streaming_distributions' if streaming else 'distributions'
- key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
+ list_name = "streaming_distributions" if streaming else "distributions"
+ key_list = ["Id", "ARN", "Status", "LastModifiedTime", "DomainName", "Comment", "PriceClass", "Enabled"]
distribution_list = {list_name: []}
- distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
+ distributions = (
+ self.list_streaming_distributions(keyed=False) if streaming else self.list_distributions(keyed=False)
+ )
for dist in distributions:
- temp_distribution = {}
- for key_name in key_list:
- temp_distribution[key_name] = dist[key_name]
- temp_distribution['Aliases'] = list(dist['Aliases'].get('Items', []))
- temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
+ temp_distribution = {k: dist[k] for k in key_list}
+ temp_distribution["Aliases"] = list(dist["Aliases"].get("Items", []))
+ temp_distribution["ETag"] = self.get_etag_from_distribution_id(dist["Id"], streaming)
if not streaming:
- temp_distribution['WebACLId'] = dist['WebACLId']
- invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
+ temp_distribution["WebACLId"] = dist["WebACLId"]
+ invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist["Id"])
if invalidation_ids:
- temp_distribution['Invalidations'] = invalidation_ids
- resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'], aws_retry=True)
- temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
+ temp_distribution["Invalidations"] = invalidation_ids
+ resource_tags = self.list_resource_tags(dist["ARN"])
+ temp_distribution["Tags"] = boto3_tag_list_to_ansible_dict(resource_tags["Tags"].get("Items", []))
distribution_list[list_name].append(temp_distribution)
return distribution_list
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
@@ -180,50 +201,32 @@ class CloudFrontFactsServiceManager(object):
def get_etag_from_distribution_id(self, distribution_id, streaming):
distribution = {}
if not streaming:
- distribution = self.get_distribution(distribution_id)
+ distribution = self.get_distribution(id=distribution_id)
else:
- distribution = self.get_streaming_distribution(distribution_id)
- return distribution['ETag']
+ distribution = self.get_streaming_distribution(id=distribution_id)
+ return distribution["ETag"]
def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
try:
- invalidation_ids = []
- invalidations = self.list_invalidations(distribution_id)
- for invalidation in invalidations:
- invalidation_ids.append(invalidation['Id'])
- return invalidation_ids
+ return list(map(lambda x: x["Id"], self.list_invalidations(distribution_id=distribution_id)))
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
def get_distribution_id_from_domain_name(self, domain_name):
try:
distribution_id = ""
- distributions = self.list_distributions(False)
- distributions += self.list_streaming_distributions(False)
+ distributions = self.list_distributions(keyed=False)
+ distributions += self.list_streaming_distributions(keyed=False)
for dist in distributions:
- if 'Items' in dist['Aliases']:
- for alias in dist['Aliases']['Items']:
- if str(alias).lower() == domain_name.lower():
- distribution_id = dist['Id']
- break
+ if any(str(alias).lower() == domain_name.lower() for alias in dist["Aliases"].get("Items", [])):
+ distribution_id = dist["Id"]
return distribution_id
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
def get_aliases_from_distribution_id(self, distribution_id):
try:
- distribution = self.get_distribution(distribution_id)
- return distribution['DistributionConfig']['Aliases'].get('Items', [])
+ distribution = self.get_distribution(id=distribution_id)
+ return distribution["Distribution"]["DistributionConfig"]["Aliases"].get("Items", [])
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
-
- def keyed_list_helper(self, list_to_key):
- keyed_list = dict()
- for item in list_to_key:
- distribution_id = item['Id']
- if 'Items' in item['Aliases']:
- aliases = item['Aliases']['Items']
- for alias in aliases:
- keyed_list.update({alias: item})
- keyed_list.update({distribution_id: item})
- return keyed_list
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/common.py b/ansible_collections/amazon/aws/plugins/module_utils/common.py
new file mode 100644
index 000000000..673915725
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/common.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+AMAZON_AWS_COLLECTION_NAME = "amazon.aws"
+AMAZON_AWS_COLLECTION_VERSION = "7.4.0"
+
+
+_collection_info_context = {
+ "name": AMAZON_AWS_COLLECTION_NAME,
+ "version": AMAZON_AWS_COLLECTION_VERSION,
+}
+
+
+def set_collection_info(collection_name=None, collection_version=None):
+ if collection_name:
+ _collection_info_context["name"] = collection_name
+ if collection_version:
+ _collection_info_context["version"] = collection_version
+
+
+def get_collection_info():
+ return _collection_info_context
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/core.py b/ansible_collections/amazon/aws/plugins/module_utils/core.py
index bfd7fe101..44fd1d80b 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/core.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/core.py
@@ -1,27 +1,14 @@
-#
+# -*- coding: utf-8 -*-
+
# Copyright 2017 Michael De La Rue | Ansible
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""This module adds shared support for generic Amazon AWS modules
In order to use this module, include it as part of a custom
module as shown below.
- from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
mutually_exclusive=list1, required_together=list2)
@@ -50,19 +37,19 @@ The call will be retried the specified number of times, so the calling functions
don't need to be wrapped in the backoff decorator.
"""
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn
from .arn import parse_aws_arn # pylint: disable=unused-import
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
from .botocore import HAS_BOTO3 # pylint: disable=unused-import
+from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import
from .botocore import is_boto3_error_code # pylint: disable=unused-import
from .botocore import is_boto3_error_message # pylint: disable=unused-import
-from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import
from .botocore import normalize_boto3_result # pylint: disable=unused-import
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.exceptions
+from .exceptions import AnsibleAWSError # pylint: disable=unused-import
+
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules
from .modules import AnsibleAWSModule # pylint: disable=unused-import
@@ -70,8 +57,4 @@ from .modules import AnsibleAWSModule # pylint: disable=unused-import
from .transformation import scrub_none_parameters # pylint: disable=unused-import
# We will also export HAS_BOTO3 so end user modules can use it.
-__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code', 'is_boto3_error_message')
-
-
-class AnsibleAWSError(Exception):
- pass
+__all__ = ("AnsibleAWSModule", "HAS_BOTO3", "is_boto3_error_code", "is_boto3_error_message")
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
index abcbcfd23..8fdaf94b8 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
#
# This code is part of Ansible, but is an independent component.
@@ -24,14 +26,11 @@
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
+
"""
This module adds shared support for Direct Connect modules.
"""
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import traceback
try:
@@ -39,7 +38,7 @@ try:
except ImportError:
pass
-from .ec2 import AWSRetry
+from .retries import AWSRetry
class DirectConnectError(Exception):
@@ -53,37 +52,41 @@ def delete_connection(client, connection_id):
try:
AWSRetry.jittered_backoff()(client.delete_connection)(connectionId=connection_id)
except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id),
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=f"Failed to delete DirectConnection {connection_id}.",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
def associate_connection_and_lag(client, connection_id, lag_id):
try:
- AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id,
- lagId=lag_id)
+ AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id, lagId=lag_id)
except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}"
- " with link aggregation group {1}.".format(connection_id, lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=f"Failed to associate Direct Connect connection {connection_id} with link aggregation group {lag_id}.",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
def disassociate_connection_and_lag(client, connection_id, lag_id):
try:
- AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id,
- lagId=lag_id)
+ AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id, lagId=lag_id)
except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}"
- " from link aggregation group {1}.".format(connection_id, lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=f"Failed to disassociate Direct Connect connection {connection_id} from link aggregation group {lag_id}.",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
def delete_virtual_interface(client, virtual_interface):
try:
AWSRetry.jittered_backoff()(client.delete_virtual_interface)(virtualInterfaceId=virtual_interface)
except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface),
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=f"Could not delete virtual interface {virtual_interface}",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
index 817c12298..afe8208f5 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -35,21 +37,19 @@ lived here. Most of these functions were not specific to EC2, they ended
up in this module because "that's where the AWS code was" (originally).
"""
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import re
from ansible.module_utils.ansible_release import __version__
-from ansible.module_utils.six import string_types
-from ansible.module_utils.six import integer_types
+
# Used to live here, moved into ansible.module_utils.common.dict_transformations
from ansible.module_utils.common.dict_transformations import _camel_to_snake # pylint: disable=unused-import
from ansible.module_utils.common.dict_transformations import _snake_to_camel # pylint: disable=unused-import
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict # pylint: disable=unused-import
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict # pylint: disable=unused-import
+from ansible.module_utils.six import integer_types
+from ansible.module_utils.six import string_types
-# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.arn
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn
from .arn import is_outpost_arn as is_outposts_arn # pylint: disable=unused-import
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
@@ -57,19 +57,26 @@ from .botocore import HAS_BOTO3 # pylint: disable=unused-import
from .botocore import boto3_conn # pylint: disable=unused-import
from .botocore import boto3_inventory_conn # pylint: disable=unused-import
from .botocore import boto_exception # pylint: disable=unused-import
-from .botocore import get_aws_region # pylint: disable=unused-import
from .botocore import get_aws_connection_info # pylint: disable=unused-import
-
+from .botocore import get_aws_region # pylint: disable=unused-import
from .botocore import paginated_query_with_retries
-# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore
-from .core import AnsibleAWSError # pylint: disable=unused-import
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.exceptions
+from .exceptions import AnsibleAWSError # pylint: disable=unused-import
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules
# The names have been changed in .modules to better reflect their applicability.
from .modules import _aws_common_argument_spec as aws_common_argument_spec # pylint: disable=unused-import
from .modules import aws_argument_spec as ec2_argument_spec # pylint: disable=unused-import
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.policy
+from .policy import _py3cmp as py3cmp # pylint: disable=unused-import
+from .policy import compare_policies # pylint: disable=unused-import
+from .policy import sort_json_policy_dict # pylint: disable=unused-import
+
+# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.retries
+from .retries import AWSRetry # pylint: disable=unused-import
+
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.tagging
from .tagging import ansible_dict_to_boto3_tag_list # pylint: disable=unused-import
from .tagging import boto3_tag_list_to_ansible_dict # pylint: disable=unused-import
@@ -79,14 +86,6 @@ from .tagging import compare_aws_tags # pylint: disable=unused-import
from .transformation import ansible_dict_to_boto3_filter_list # pylint: disable=unused-import
from .transformation import map_complex_type # pylint: disable=unused-import
-# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.policy
-from .policy import _py3cmp as py3cmp # pylint: disable=unused-import
-from .policy import compare_policies # pylint: disable=unused-import
-from .policy import sort_json_policy_dict # pylint: disable=unused-import
-
-# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.retries
-from .retries import AWSRetry # pylint: disable=unused-import
-
try:
import botocore
except ImportError:
@@ -94,18 +93,17 @@ except ImportError:
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=None):
-
- """ Return list of security group IDs from security group names. Note that security group names are not unique
- across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
- will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
- a try block
- """
+ """Return list of security group IDs from security group names. Note that security group names are not unique
+ across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
+ will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
+ a try block
+ """
def get_sg_name(sg, boto3=None):
- return str(sg['GroupName'])
+ return str(sg["GroupName"])
def get_sg_id(sg, boto3=None):
- return str(sg['GroupId'])
+ return str(sg["GroupId"])
sec_group_id_list = []
@@ -116,25 +114,25 @@ def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id
if vpc_id:
filters = [
{
- 'Name': 'vpc-id',
- 'Values': [
+ "Name": "vpc-id",
+ "Values": [
vpc_id,
- ]
+ ],
}
]
- all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
+ all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)["SecurityGroups"]
else:
- all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
+ all_sec_groups = ec2_connection.describe_security_groups()["SecurityGroups"]
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
- sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
- still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
+ sec_group_id_list[:] = [sg for sg in unmatched if re.match("sg-[a-fA-F0-9]+$", sg)]
+ still_unmatched = [sg for sg in unmatched if not re.match("sg-[a-fA-F0-9]+$", sg)]
if len(still_unmatched) > 0:
- raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
+ raise ValueError(f"The following group names are not valid: {', '.join(still_unmatched)}")
sec_group_id_list += [get_sg_id(all_sg) for all_sg in all_sec_groups if get_sg_name(all_sg) in sec_group_name_list]
@@ -162,13 +160,11 @@ def add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes=None):
try:
tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set)
- AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(
- client.create_tags
- )(
+ AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(client.create_tags)(
Resources=[resource_id], Tags=tags_to_add
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Unable to add tags {0} to {1}".format(tags_to_set, resource_id))
+ module.fail_json_aws(e, msg=f"Unable to add tags {tags_to_set} to {resource_id}")
return True
@@ -194,13 +190,11 @@ def remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes=None
tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset]
try:
- AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(
- client.delete_tags
- )(
+ AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(client.delete_tags)(
Resources=[resource_id], Tags=tags_to_remove
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete tags {0} from {1}".format(tags_to_unset, resource_id))
+ module.fail_json_aws(e, msg=f"Unable to delete tags {tags_to_unset} from {resource_id}")
return True
@@ -214,9 +208,9 @@ def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_cod
:param resource_type: the type of the resource
:param retry_codes: additional boto3 error codes to trigger retries
"""
- filters = {'resource-id': resource_id}
+ filters = {"resource-id": resource_id}
if resource_type:
- filters['resource-type'] = resource_type
+ filters["resource-type"] = resource_type
filters = ansible_dict_to_boto3_filter_list(filters)
if not retry_codes:
@@ -224,11 +218,12 @@ def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_cod
try:
retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)
- results = paginated_query_with_retries(client, 'describe_tags', retry_decorator=retry_decorator,
- Filters=filters)
- return boto3_tag_list_to_ansible_dict(results.get('Tags', None))
+ results = paginated_query_with_retries(
+ client, "describe_tags", retry_decorator=retry_decorator, Filters=filters
+ )
+ return boto3_tag_list_to_ansible_dict(results.get("Tags", None))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to describe tags for EC2 Resource: {0}".format(resource_id))
+ module.fail_json_aws(e, msg=f"Failed to describe tags for EC2 Resource: {resource_id}")
def ensure_ec2_tags(client, module, resource_id, resource_type=None, tags=None, purge_tags=True, retry_codes=None):
@@ -297,14 +292,23 @@ def normalize_ec2_vpc_dhcp_config(option_config):
for config_item in option_config:
# Handle single value keys
- if config_item['Key'] == 'netbios-node-type':
- if isinstance(config_item['Values'], integer_types):
- config_data['netbios-node-type'] = str((config_item['Values']))
- elif isinstance(config_item['Values'], list):
- config_data['netbios-node-type'] = str((config_item['Values'][0]['Value']))
+ if config_item["Key"] == "netbios-node-type":
+ if isinstance(config_item["Values"], integer_types):
+ config_data["netbios-node-type"] = str((config_item["Values"]))
+ elif isinstance(config_item["Values"], list):
+ config_data["netbios-node-type"] = str((config_item["Values"][0]["Value"]))
# Handle actual lists of values
- for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']:
- if config_item['Key'] == option:
- config_data[option] = [val['Value'] for val in config_item['Values']]
+ for option in ["domain-name", "domain-name-servers", "ntp-servers", "netbios-name-servers"]:
+ if config_item["Key"] == option:
+ config_data[option] = [val["Value"] for val in config_item["Values"]]
return config_data
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def helper_describe_import_image_tasks(client, module, **params):
+ try:
+ paginator = client.get_paginator("describe_import_image_tasks")
+ return paginator.paginate(**params).build_full_result()["ImportImageTasks"]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe the import image")
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
index 218052d2f..8dc5eabfe 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py
@@ -1,16 +1,16 @@
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass
-from .core import is_boto3_error_code
-from .ec2 import AWSRetry
+from .botocore import is_boto3_error_code
+from .retries import AWSRetry
def get_elb(connection, module, elb_name):
@@ -40,9 +40,9 @@ def _get_elb(connection, module, elb_name):
"""
try:
- load_balancer_paginator = connection.get_paginator('describe_load_balancers')
- return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0]
- except is_boto3_error_code('LoadBalancerNotFound'):
+ load_balancer_paginator = connection.get_paginator("describe_load_balancers")
+ return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())["LoadBalancers"][0]
+ except is_boto3_error_code("LoadBalancerNotFound"):
return None
@@ -58,15 +58,17 @@ def get_elb_listener(connection, module, elb_arn, listener_port):
"""
try:
- listener_paginator = connection.get_paginator('describe_listeners')
- listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners']
+ listener_paginator = connection.get_paginator("describe_listeners")
+ listeners = (
+ AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result()
+ )["Listeners"]
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
l = None
for listener in listeners:
- if listener['Port'] == listener_port:
+ if listener["Port"] == listener_port:
l = listener
break
@@ -84,7 +86,7 @@ def get_elb_listener_rules(connection, module, listener_arn):
"""
try:
- return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules']
+ return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)["Rules"]
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
@@ -104,6 +106,6 @@ def convert_tg_name_to_arn(connection, module, tg_name):
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
- tg_arn = response['TargetGroups'][0]['TargetGroupArn']
+ tg_arn = response["TargetGroups"][0]["TargetGroupArn"]
return tg_arn
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
index 04f6114e1..758eb9a33 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
@@ -1,36 +1,36 @@
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import traceback
from copy import deepcopy
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass
-from .ec2 import AWSRetry
-from .ec2 import ansible_dict_to_boto3_tag_list
-from .ec2 import boto3_tag_list_to_ansible_dict
from .ec2 import get_ec2_security_group_ids_from_names
from .elb_utils import convert_tg_name_to_arn
from .elb_utils import get_elb
from .elb_utils import get_elb_listener
+from .retries import AWSRetry
+from .tagging import ansible_dict_to_boto3_tag_list
+from .tagging import boto3_tag_list_to_ansible_dict
from .waiters import get_waiter
def _simple_forward_config_arn(config, parent_arn):
config = deepcopy(config)
- stickiness = config.pop('TargetGroupStickinessConfig', {'Enabled': False})
+ stickiness = config.pop("TargetGroupStickinessConfig", {"Enabled": False})
# Stickiness options set, non default value
- if stickiness != {'Enabled': False}:
+ if stickiness != {"Enabled": False}:
return False
- target_groups = config.pop('TargetGroups', [])
+ target_groups = config.pop("TargetGroups", [])
# non-default config left over, probably invalid
if config:
@@ -45,9 +45,9 @@ def _simple_forward_config_arn(config, parent_arn):
target_group = target_groups[0]
# We don't care about the weight with a single TG
- target_group.pop('Weight', None)
+ target_group.pop("Weight", None)
- target_group_arn = target_group.pop('TargetGroupArn', None)
+ target_group_arn = target_group.pop("TargetGroupArn", None)
# non-default config left over
if target_group:
@@ -75,12 +75,12 @@ def _prune_ForwardConfig(action):
Drops a redundant ForwardConfig where TargetGroupARN has already been set.
(So we can perform comparisons)
"""
- if action.get('Type', "") != 'forward':
+ if action.get("Type", "") != "forward":
return action
if "ForwardConfig" not in action:
return action
- parent_arn = action.get('TargetGroupArn', None)
+ parent_arn = action.get("TargetGroupArn", None)
arn = _simple_forward_config_arn(action["ForwardConfig"], parent_arn)
if not arn:
return action
@@ -95,17 +95,23 @@ def _prune_ForwardConfig(action):
# remove the client secret if UseExistingClientSecret, because aws won't return it
# add default values when they are not requested
def _prune_secret(action):
- if action['Type'] != 'authenticate-oidc':
+ if action["Type"] != "authenticate-oidc":
return action
- if not action['AuthenticateOidcConfig'].get('Scope', False):
- action['AuthenticateOidcConfig']['Scope'] = 'openid'
+ if not action["AuthenticateOidcConfig"].get("Scope", False):
+ action["AuthenticateOidcConfig"]["Scope"] = "openid"
+
+ if not action["AuthenticateOidcConfig"].get("SessionTimeout", False):
+ action["AuthenticateOidcConfig"]["SessionTimeout"] = 604800
- if not action['AuthenticateOidcConfig'].get('SessionTimeout', False):
- action['AuthenticateOidcConfig']['SessionTimeout'] = 604800
+ if action["AuthenticateOidcConfig"].get("UseExistingClientSecret", False):
+ action["AuthenticateOidcConfig"].pop("ClientSecret", None)
- if action['AuthenticateOidcConfig'].get('UseExistingClientSecret', False):
- action['AuthenticateOidcConfig'].pop('ClientSecret', None)
+ if not action["AuthenticateOidcConfig"].get("OnUnauthenticatedRequest", False):
+ action["AuthenticateOidcConfig"]["OnUnauthenticatedRequest"] = "authenticate"
+
+ if not action["AuthenticateOidcConfig"].get("SessionCookieName", False):
+ action["AuthenticateOidcConfig"]["SessionCookieName"] = "AWSELBAuthSessionCookie"
return action
@@ -113,22 +119,20 @@ def _prune_secret(action):
# while AWS api also won't return UseExistingClientSecret key
# it must be added, because it's requested and compared
def _append_use_existing_client_secretn(action):
- if action['Type'] != 'authenticate-oidc':
+ if action["Type"] != "authenticate-oidc":
return action
- action['AuthenticateOidcConfig']['UseExistingClientSecret'] = True
+ action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = True
return action
def _sort_actions(actions):
- return sorted(actions, key=lambda x: x.get('Order', 0))
-
+ return sorted(actions, key=lambda x: x.get("Order", 0))
-class ElasticLoadBalancerV2(object):
+class ElasticLoadBalancerV2:
def __init__(self, connection, module):
-
self.connection = connection
self.module = module
self.changed = False
@@ -152,7 +156,7 @@ class ElasticLoadBalancerV2(object):
if self.elb is not None:
self.elb_attributes = self.get_elb_attributes()
self.elb_ip_addr_type = self.get_elb_ip_address_type()
- self.elb['tags'] = self.get_elb_tags()
+ self.elb["tags"] = self.get_elb_tags()
else:
self.elb_attributes = None
@@ -168,8 +172,8 @@ class ElasticLoadBalancerV2(object):
return
waiter_names = {
- 'ipv4': 'load_balancer_ip_address_type_ipv4',
- 'dualstack': 'load_balancer_ip_address_type_dualstack',
+ "ipv4": "load_balancer_ip_address_type_ipv4",
+ "dualstack": "load_balancer_ip_address_type_dualstack",
}
if ip_type not in waiter_names:
return
@@ -192,7 +196,7 @@ class ElasticLoadBalancerV2(object):
return
try:
- waiter = get_waiter(self.connection, 'load_balancer_available')
+ waiter = get_waiter(self.connection, "load_balancer_available")
waiter.wait(LoadBalancerArns=[elb_arn])
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -209,7 +213,7 @@ class ElasticLoadBalancerV2(object):
return
try:
- waiter = get_waiter(self.connection, 'load_balancers_deleted')
+ waiter = get_waiter(self.connection, "load_balancers_deleted")
waiter.wait(LoadBalancerArns=[elb_arn])
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -222,16 +226,16 @@ class ElasticLoadBalancerV2(object):
"""
try:
- attr_list = AWSRetry.jittered_backoff()(
- self.connection.describe_load_balancer_attributes
- )(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes']
+ attr_list = AWSRetry.jittered_backoff()(self.connection.describe_load_balancer_attributes)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"]
+ )["Attributes"]
elb_attributes = boto3_tag_list_to_ansible_dict(attr_list)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
# Replace '.' with '_' in attribute key names to make it more Ansibley
- return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items())
+ return dict((k.replace(".", "_"), v) for k, v in elb_attributes.items())
def get_elb_ip_address_type(self):
"""
@@ -240,7 +244,7 @@ class ElasticLoadBalancerV2(object):
:return:
"""
- return self.elb.get('IpAddressType', None)
+ return self.elb.get("IpAddressType", None)
def update_elb_attributes(self):
"""
@@ -257,9 +261,9 @@ class ElasticLoadBalancerV2(object):
"""
try:
- return AWSRetry.jittered_backoff()(
- self.connection.describe_tags
- )(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags']
+ return AWSRetry.jittered_backoff()(self.connection.describe_tags)(
+ ResourceArns=[self.elb["LoadBalancerArn"]]
+ )["TagDescriptions"][0]["Tags"]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -271,9 +275,9 @@ class ElasticLoadBalancerV2(object):
"""
try:
- AWSRetry.jittered_backoff()(
- self.connection.remove_tags
- )(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete)
+ AWSRetry.jittered_backoff()(self.connection.remove_tags)(
+ ResourceArns=[self.elb["LoadBalancerArn"]], TagKeys=tags_to_delete
+ )
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -287,9 +291,9 @@ class ElasticLoadBalancerV2(object):
"""
try:
- AWSRetry.jittered_backoff()(
- self.connection.add_tags
- )(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags)
+ AWSRetry.jittered_backoff()(self.connection.add_tags)(
+ ResourceArns=[self.elb["LoadBalancerArn"]], Tags=self.tags
+ )
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -302,13 +306,13 @@ class ElasticLoadBalancerV2(object):
"""
try:
- AWSRetry.jittered_backoff()(
- self.connection.delete_load_balancer
- )(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"]
+ )
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
- self.wait_for_deletion(self.elb['LoadBalancerArn'])
+ self.wait_for_deletion(self.elb["LoadBalancerArn"])
self.changed = True
@@ -326,7 +330,7 @@ class ElasticLoadBalancerV2(object):
if self.subnets is not None:
# Convert subnets to subnet_mappings format for comparison
for subnet in self.subnets:
- subnet_mappings.append({'SubnetId': subnet})
+ subnet_mappings.append({"SubnetId": subnet})
if self.subnet_mappings is not None:
# Use this directly since we're comparing as a mapping
@@ -334,16 +338,18 @@ class ElasticLoadBalancerV2(object):
# Build a subnet_mapping style struture of what's currently
# on the load balancer
- for subnet in self.elb['AvailabilityZones']:
- this_mapping = {'SubnetId': subnet['SubnetId']}
- for address in subnet.get('LoadBalancerAddresses', []):
- if 'AllocationId' in address:
- this_mapping['AllocationId'] = address['AllocationId']
+ for subnet in self.elb["AvailabilityZones"]:
+ this_mapping = {"SubnetId": subnet["SubnetId"]}
+ for address in subnet.get("LoadBalancerAddresses", []):
+ if "AllocationId" in address:
+ this_mapping["AllocationId"] = address["AllocationId"]
break
subnet_mapping_id_list.append(this_mapping)
- return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings)
+ return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(
+ frozenset(mapping.items()) for mapping in subnet_mappings
+ )
def modify_subnets(self):
"""
@@ -352,9 +358,9 @@ class ElasticLoadBalancerV2(object):
"""
try:
- AWSRetry.jittered_backoff()(
- self.connection.set_subnets
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets)
+ AWSRetry.jittered_backoff()(self.connection.set_subnets)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"], Subnets=self.subnets
+ )
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -367,7 +373,7 @@ class ElasticLoadBalancerV2(object):
"""
self.elb = get_elb(self.connection, self.module, self.module.params.get("name"))
- self.elb['tags'] = self.get_elb_tags()
+ self.elb["tags"] = self.get_elb_tags()
def modify_ip_address_type(self, ip_addr_type):
"""
@@ -380,30 +386,30 @@ class ElasticLoadBalancerV2(object):
return
try:
- AWSRetry.jittered_backoff()(
- self.connection.set_ip_address_type
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], IpAddressType=ip_addr_type)
+ AWSRetry.jittered_backoff()(self.connection.set_ip_address_type)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"], IpAddressType=ip_addr_type
+ )
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
self.changed = True
- self.wait_for_ip_type(self.elb['LoadBalancerArn'], ip_addr_type)
+ self.wait_for_ip_type(self.elb["LoadBalancerArn"], ip_addr_type)
def _elb_create_params(self):
# Required parameters
params = dict()
- params['Name'] = self.name
- params['Type'] = self.type
+ params["Name"] = self.name
+ params["Type"] = self.type
# Other parameters
if self.elb_ip_addr_type is not None:
- params['IpAddressType'] = self.elb_ip_addr_type
+ params["IpAddressType"] = self.elb_ip_addr_type
if self.subnets is not None:
- params['Subnets'] = self.subnets
+ params["Subnets"] = self.subnets
if self.subnet_mappings is not None:
- params['SubnetMappings'] = self.subnet_mappings
+ params["SubnetMappings"] = self.subnet_mappings
if self.tags:
- params['Tags'] = self.tags
+ params["Tags"] = self.tags
# Scheme isn't supported for GatewayLBs, so we won't add it here, even though we don't
# support them yet.
@@ -418,40 +424,39 @@ class ElasticLoadBalancerV2(object):
params = self._elb_create_params()
try:
- self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
+ self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)["LoadBalancers"][0]
self.changed = True
self.new_load_balancer = True
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
- self.wait_for_status(self.elb['LoadBalancerArn'])
+ self.wait_for_status(self.elb["LoadBalancerArn"])
class ApplicationLoadBalancer(ElasticLoadBalancerV2):
-
def __init__(self, connection, connection_ec2, module):
"""
:param connection: boto3 connection
:param module: Ansible module
"""
- super(ApplicationLoadBalancer, self).__init__(connection, module)
+ super().__init__(connection, module)
self.connection_ec2 = connection_ec2
# Ansible module parameters specific to ALBs
- self.type = 'application'
- if module.params.get('security_groups') is not None:
+ self.type = "application"
+ if module.params.get("security_groups") is not None:
try:
- self.security_groups = AWSRetry.jittered_backoff()(
- get_ec2_security_group_ids_from_names
- )(module.params.get('security_groups'), self.connection_ec2, boto3=True)
+ self.security_groups = AWSRetry.jittered_backoff()(get_ec2_security_group_ids_from_names)(
+ module.params.get("security_groups"), self.connection_ec2, boto3=True
+ )
except ValueError as e:
self.module.fail_json(msg=str(e), exception=traceback.format_exc())
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
else:
- self.security_groups = module.params.get('security_groups')
+ self.security_groups = module.params.get("security_groups")
self.access_logs_enabled = module.params.get("access_logs_enabled")
self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket")
self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix")
@@ -463,15 +468,17 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2):
self.http_xff_client_port = module.params.get("http_xff_client_port")
self.waf_fail_open = module.params.get("waf_fail_open")
- if self.elb is not None and self.elb['Type'] != 'application':
- self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.")
+ if self.elb is not None and self.elb["Type"] != "application":
+ self.module.fail_json(
+ msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.",
+ )
def _elb_create_params(self):
params = super()._elb_create_params()
if self.security_groups is not None:
- params['SecurityGroups'] = self.security_groups
- params['Scheme'] = self.scheme
+ params["SecurityGroups"] = self.security_groups
+ params["Scheme"] = self.scheme
return params
@@ -482,34 +489,77 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2):
"""
update_attributes = []
- if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
- update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
- if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
- update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
- if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
- update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
- if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
- update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
- if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
- update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
- if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
- update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
- if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \
- self.elb_attributes['routing_http_desync_mitigation_mode']:
- update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()})
- if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \
- self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']:
- update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()})
- if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \
- self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']:
- update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled',
- 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()})
- if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \
- self.elb_attributes['routing_http_xff_client_port_enabled']:
- update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()})
- if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \
- self.elb_attributes['waf_fail_open_enabled']:
- update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()})
+ if (
+ self.access_logs_enabled is not None
+ and str(self.access_logs_enabled).lower() != self.elb_attributes["access_logs_s3_enabled"]
+ ):
+ update_attributes.append({"Key": "access_logs.s3.enabled", "Value": str(self.access_logs_enabled).lower()})
+ if (
+ self.access_logs_s3_bucket is not None
+ and self.access_logs_s3_bucket != self.elb_attributes["access_logs_s3_bucket"]
+ ):
+ update_attributes.append({"Key": "access_logs.s3.bucket", "Value": self.access_logs_s3_bucket})
+ if (
+ self.access_logs_s3_prefix is not None
+ and self.access_logs_s3_prefix != self.elb_attributes["access_logs_s3_prefix"]
+ ):
+ update_attributes.append({"Key": "access_logs.s3.prefix", "Value": self.access_logs_s3_prefix})
+ if (
+ self.deletion_protection is not None
+ and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"]
+ ):
+ update_attributes.append(
+ {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()}
+ )
+ if (
+ self.idle_timeout is not None
+ and str(self.idle_timeout) != self.elb_attributes["idle_timeout_timeout_seconds"]
+ ):
+ update_attributes.append({"Key": "idle_timeout.timeout_seconds", "Value": str(self.idle_timeout)})
+ if self.http2 is not None and str(self.http2).lower() != self.elb_attributes["routing_http2_enabled"]:
+ update_attributes.append({"Key": "routing.http2.enabled", "Value": str(self.http2).lower()})
+ if (
+ self.http_desync_mitigation_mode is not None
+ and str(self.http_desync_mitigation_mode).lower()
+ != self.elb_attributes["routing_http_desync_mitigation_mode"]
+ ):
+ update_attributes.append(
+ {"Key": "routing.http.desync_mitigation_mode", "Value": str(self.http_desync_mitigation_mode).lower()}
+ )
+ if (
+ self.http_drop_invalid_header_fields is not None
+ and str(self.http_drop_invalid_header_fields).lower()
+ != self.elb_attributes["routing_http_drop_invalid_header_fields_enabled"]
+ ):
+ update_attributes.append(
+ {
+ "Key": "routing.http.drop_invalid_header_fields.enabled",
+ "Value": str(self.http_drop_invalid_header_fields).lower(),
+ }
+ )
+ if (
+ self.http_x_amzn_tls_version_and_cipher_suite is not None
+ and str(self.http_x_amzn_tls_version_and_cipher_suite).lower()
+ != self.elb_attributes["routing_http_x_amzn_tls_version_and_cipher_suite_enabled"]
+ ):
+ update_attributes.append(
+ {
+ "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled",
+ "Value": str(self.http_x_amzn_tls_version_and_cipher_suite).lower(),
+ }
+ )
+ if (
+ self.http_xff_client_port is not None
+ and str(self.http_xff_client_port).lower() != self.elb_attributes["routing_http_xff_client_port_enabled"]
+ ):
+ update_attributes.append(
+ {"Key": "routing.http.xff_client_port.enabled", "Value": str(self.http_xff_client_port).lower()}
+ )
+ if (
+ self.waf_fail_open is not None
+ and str(self.waf_fail_open).lower() != self.elb_attributes["waf_fail_open_enabled"]
+ ):
+ update_attributes.append({"Key": "waf.fail_open.enabled", "Value": str(self.waf_fail_open).lower()})
if update_attributes:
return False
@@ -525,45 +575,90 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2):
update_attributes = []
- if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
- update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
- if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
- update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
- if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
- update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
- if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
- update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
- if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
- update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
- if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
- update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
- if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \
- self.elb_attributes['routing_http_desync_mitigation_mode']:
- update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()})
- if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \
- self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']:
- update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()})
- if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \
- self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']:
- update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled',
- 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()})
- if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \
- self.elb_attributes['routing_http_xff_client_port_enabled']:
- update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()})
- if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \
- self.elb_attributes['waf_fail_open_enabled']:
- update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()})
+ if (
+ self.access_logs_enabled is not None
+ and str(self.access_logs_enabled).lower() != self.elb_attributes["access_logs_s3_enabled"]
+ ):
+ update_attributes.append({"Key": "access_logs.s3.enabled", "Value": str(self.access_logs_enabled).lower()})
+ if (
+ self.access_logs_s3_bucket is not None
+ and self.access_logs_s3_bucket != self.elb_attributes["access_logs_s3_bucket"]
+ ):
+ update_attributes.append({"Key": "access_logs.s3.bucket", "Value": self.access_logs_s3_bucket})
+ if (
+ self.access_logs_s3_prefix is not None
+ and self.access_logs_s3_prefix != self.elb_attributes["access_logs_s3_prefix"]
+ ):
+ update_attributes.append({"Key": "access_logs.s3.prefix", "Value": self.access_logs_s3_prefix})
+ if (
+ self.deletion_protection is not None
+ and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"]
+ ):
+ update_attributes.append(
+ {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()}
+ )
+ if (
+ self.idle_timeout is not None
+ and str(self.idle_timeout) != self.elb_attributes["idle_timeout_timeout_seconds"]
+ ):
+ update_attributes.append({"Key": "idle_timeout.timeout_seconds", "Value": str(self.idle_timeout)})
+ if self.http2 is not None and str(self.http2).lower() != self.elb_attributes["routing_http2_enabled"]:
+ update_attributes.append({"Key": "routing.http2.enabled", "Value": str(self.http2).lower()})
+ if (
+ self.http_desync_mitigation_mode is not None
+ and str(self.http_desync_mitigation_mode).lower()
+ != self.elb_attributes["routing_http_desync_mitigation_mode"]
+ ):
+ update_attributes.append(
+ {"Key": "routing.http.desync_mitigation_mode", "Value": str(self.http_desync_mitigation_mode).lower()}
+ )
+ if (
+ self.http_drop_invalid_header_fields is not None
+ and str(self.http_drop_invalid_header_fields).lower()
+ != self.elb_attributes["routing_http_drop_invalid_header_fields_enabled"]
+ ):
+ update_attributes.append(
+ {
+ "Key": "routing.http.drop_invalid_header_fields.enabled",
+ "Value": str(self.http_drop_invalid_header_fields).lower(),
+ }
+ )
+ if (
+ self.http_x_amzn_tls_version_and_cipher_suite is not None
+ and str(self.http_x_amzn_tls_version_and_cipher_suite).lower()
+ != self.elb_attributes["routing_http_x_amzn_tls_version_and_cipher_suite_enabled"]
+ ):
+ update_attributes.append(
+ {
+ "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled",
+ "Value": str(self.http_x_amzn_tls_version_and_cipher_suite).lower(),
+ }
+ )
+ if (
+ self.http_xff_client_port is not None
+ and str(self.http_xff_client_port).lower() != self.elb_attributes["routing_http_xff_client_port_enabled"]
+ ):
+ update_attributes.append(
+ {"Key": "routing.http.xff_client_port.enabled", "Value": str(self.http_xff_client_port).lower()}
+ )
+ if (
+ self.waf_fail_open is not None
+ and str(self.waf_fail_open).lower() != self.elb_attributes["waf_fail_open_enabled"]
+ ):
+ update_attributes.append({"Key": "waf.fail_open.enabled", "Value": str(self.waf_fail_open).lower()})
if update_attributes:
try:
- AWSRetry.jittered_backoff()(
- self.connection.modify_load_balancer_attributes
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
+ AWSRetry.jittered_backoff()(self.connection.modify_load_balancer_attributes)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"], Attributes=update_attributes
+ )
self.changed = True
except (BotoCoreError, ClientError) as e:
# Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
if self.new_load_balancer:
- AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"]
+ )
self.module.fail_json_aws(e)
def compare_security_groups(self):
@@ -573,7 +668,7 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2):
:return: bool True if they match otherwise False
"""
- if set(self.elb['SecurityGroups']) != set(self.security_groups):
+ if set(self.elb["SecurityGroups"]) != set(self.security_groups):
return False
else:
return True
@@ -585,9 +680,9 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2):
"""
try:
- AWSRetry.jittered_backoff()(
- self.connection.set_security_groups
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups)
+ AWSRetry.jittered_backoff()(self.connection.set_security_groups)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"], SecurityGroups=self.security_groups
+ )
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -595,29 +690,29 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2):
class NetworkLoadBalancer(ElasticLoadBalancerV2):
-
def __init__(self, connection, connection_ec2, module):
-
"""
:param connection: boto3 connection
:param module: Ansible module
"""
- super(NetworkLoadBalancer, self).__init__(connection, module)
+ super().__init__(connection, module)
self.connection_ec2 = connection_ec2
# Ansible module parameters specific to NLBs
- self.type = 'network'
- self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing')
+ self.type = "network"
+ self.cross_zone_load_balancing = module.params.get("cross_zone_load_balancing")
- if self.elb is not None and self.elb['Type'] != 'network':
- self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.")
+ if self.elb is not None and self.elb["Type"] != "network":
+ self.module.fail_json(
+ msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.",
+ )
def _elb_create_params(self):
params = super()._elb_create_params()
- params['Scheme'] = self.scheme
+ params["Scheme"] = self.scheme
return params
@@ -630,22 +725,33 @@ class NetworkLoadBalancer(ElasticLoadBalancerV2):
update_attributes = []
- if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \
- self.elb_attributes['load_balancing_cross_zone_enabled']:
- update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()})
- if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
- update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
+ if (
+ self.cross_zone_load_balancing is not None
+ and str(self.cross_zone_load_balancing).lower() != self.elb_attributes["load_balancing_cross_zone_enabled"]
+ ):
+ update_attributes.append(
+ {"Key": "load_balancing.cross_zone.enabled", "Value": str(self.cross_zone_load_balancing).lower()}
+ )
+ if (
+ self.deletion_protection is not None
+ and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"]
+ ):
+ update_attributes.append(
+ {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()}
+ )
if update_attributes:
try:
- AWSRetry.jittered_backoff()(
- self.connection.modify_load_balancer_attributes
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
+ AWSRetry.jittered_backoff()(self.connection.modify_load_balancer_attributes)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"], Attributes=update_attributes
+ )
self.changed = True
except (BotoCoreError, ClientError) as e:
# Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
if self.new_load_balancer:
- AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
+ AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(
+ LoadBalancerArn=self.elb["LoadBalancerArn"]
+ )
self.module.fail_json_aws(e)
def modify_subnets(self):
@@ -654,20 +760,21 @@ class NetworkLoadBalancer(ElasticLoadBalancerV2):
:return:
"""
- self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer')
+ self.module.fail_json(msg="Modifying subnets and elastic IPs is not supported for Network Load Balancer")
-class ELBListeners(object):
-
+class ELBListeners:
def __init__(self, connection, module, elb_arn):
-
self.connection = connection
self.module = module
self.elb_arn = elb_arn
listeners = module.params.get("listeners")
if listeners is not None:
# Remove suboption argspec defaults of None from each listener
- listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners]
+ listeners = [
+ dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None)
+ for listener_dict in listeners
+ ]
self.listeners = self._ensure_listeners_default_action_has_arn(listeners)
self.current_listeners = self._get_elb_listeners()
self.purge_listeners = module.params.get("purge_listeners")
@@ -689,8 +796,12 @@ class ELBListeners(object):
"""
try:
- listener_paginator = self.connection.get_paginator('describe_listeners')
- return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners']
+ listener_paginator = self.connection.get_paginator("describe_listeners")
+ return (
+ AWSRetry.jittered_backoff()(listener_paginator.paginate)(
+ LoadBalancerArn=self.elb_arn
+ ).build_full_result()
+ )["Listeners"]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -709,14 +820,14 @@ class ELBListeners(object):
fixed_listeners = []
for listener in listeners:
fixed_actions = []
- for action in listener['DefaultActions']:
- if 'TargetGroupName' in action:
- action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection,
- self.module,
- action['TargetGroupName'])
- del action['TargetGroupName']
+ for action in listener["DefaultActions"]:
+ if "TargetGroupName" in action:
+ action["TargetGroupArn"] = convert_tg_name_to_arn(
+ self.connection, self.module, action["TargetGroupName"]
+ )
+ del action["TargetGroupName"]
fixed_actions.append(action)
- listener['DefaultActions'] = fixed_actions
+ listener["DefaultActions"] = fixed_actions
fixed_listeners.append(listener)
return fixed_listeners
@@ -734,21 +845,21 @@ class ELBListeners(object):
for current_listener in self.current_listeners:
current_listener_passed_to_module = False
for new_listener in self.listeners[:]:
- new_listener['Port'] = int(new_listener['Port'])
- if current_listener['Port'] == new_listener['Port']:
+ new_listener["Port"] = int(new_listener["Port"])
+ if current_listener["Port"] == new_listener["Port"]:
current_listener_passed_to_module = True
# Remove what we match so that what is left can be marked as 'to be added'
listeners_to_add.remove(new_listener)
modified_listener = self._compare_listener(current_listener, new_listener)
if modified_listener:
- modified_listener['Port'] = current_listener['Port']
- modified_listener['ListenerArn'] = current_listener['ListenerArn']
+ modified_listener["Port"] = current_listener["Port"]
+ modified_listener["ListenerArn"] = current_listener["ListenerArn"]
listeners_to_modify.append(modified_listener)
break
# If the current listener was not matched against passed listeners and purge is True, mark for removal
if not current_listener_passed_to_module and self.purge_listeners:
- listeners_to_delete.append(current_listener['ListenerArn'])
+ listeners_to_delete.append(current_listener["ListenerArn"])
return listeners_to_add, listeners_to_modify, listeners_to_delete
@@ -764,43 +875,50 @@ class ELBListeners(object):
modified_listener = {}
# Port
- if current_listener['Port'] != new_listener['Port']:
- modified_listener['Port'] = new_listener['Port']
+ if current_listener["Port"] != new_listener["Port"]:
+ modified_listener["Port"] = new_listener["Port"]
# Protocol
- if current_listener['Protocol'] != new_listener['Protocol']:
- modified_listener['Protocol'] = new_listener['Protocol']
+ if current_listener["Protocol"] != new_listener["Protocol"]:
+ modified_listener["Protocol"] = new_listener["Protocol"]
# If Protocol is HTTPS, check additional attributes
- if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
+ if current_listener["Protocol"] == "HTTPS" and new_listener["Protocol"] == "HTTPS":
# Cert
- if current_listener['SslPolicy'] != new_listener['SslPolicy']:
- modified_listener['SslPolicy'] = new_listener['SslPolicy']
- if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']:
- modified_listener['Certificates'] = []
- modified_listener['Certificates'].append({})
- modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
- elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
- modified_listener['SslPolicy'] = new_listener['SslPolicy']
- modified_listener['Certificates'] = []
- modified_listener['Certificates'].append({})
- modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
+ if current_listener["SslPolicy"] != new_listener["SslPolicy"]:
+ modified_listener["SslPolicy"] = new_listener["SslPolicy"]
+ if (
+ current_listener["Certificates"][0]["CertificateArn"]
+ != new_listener["Certificates"][0]["CertificateArn"]
+ ):
+ modified_listener["Certificates"] = []
+ modified_listener["Certificates"].append({})
+ modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0][
+ "CertificateArn"
+ ]
+ elif current_listener["Protocol"] != "HTTPS" and new_listener["Protocol"] == "HTTPS":
+ modified_listener["SslPolicy"] = new_listener["SslPolicy"]
+ modified_listener["Certificates"] = []
+ modified_listener["Certificates"].append({})
+ modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0]["CertificateArn"]
# Default action
# If the lengths of the actions are the same, we'll have to verify that the
# contents of those actions are the same
- if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']):
- current_actions_sorted = _sort_actions(current_listener['DefaultActions'])
- new_actions_sorted = _sort_actions(new_listener['DefaultActions'])
+ if len(current_listener["DefaultActions"]) == len(new_listener["DefaultActions"]):
+ current_actions_sorted = _sort_actions(current_listener["DefaultActions"])
+ new_actions_sorted = _sort_actions(new_listener["DefaultActions"])
new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
- if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]:
- modified_listener['DefaultActions'] = new_listener['DefaultActions']
+ if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [
+ _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret
+ ]:
+ modified_listener["DefaultActions"] = new_listener["DefaultActions"]
# If the action lengths are different, then replace with the new actions
else:
- modified_listener['DefaultActions'] = new_listener['DefaultActions']
+ modified_listener["DefaultActions"] = new_listener["DefaultActions"]
if modified_listener:
return modified_listener
@@ -808,8 +926,7 @@ class ELBListeners(object):
return None
-class ELBListener(object):
-
+class ELBListener:
def __init__(self, connection, module, listener, elb_arn):
"""
@@ -825,37 +942,32 @@ class ELBListener(object):
self.elb_arn = elb_arn
def add(self):
-
try:
# Rules is not a valid parameter for create_listener
- if 'Rules' in self.listener:
- self.listener.pop('Rules')
+ if "Rules" in self.listener:
+ self.listener.pop("Rules")
AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
def modify(self):
-
try:
# Rules is not a valid parameter for modify_listener
- if 'Rules' in self.listener:
- self.listener.pop('Rules')
+ if "Rules" in self.listener:
+ self.listener.pop("Rules")
AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
def delete(self):
-
try:
AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
-class ELBListenerRules(object):
-
+class ELBListenerRules:
def __init__(self, connection, module, elb_arn, listener_rules, listener_port):
-
self.connection = connection
self.module = module
self.elb_arn = elb_arn
@@ -864,13 +976,10 @@ class ELBListenerRules(object):
# Get listener based on port so we can use ARN
self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port)
- self.listener_arn = self.current_listener['ListenerArn']
- self.rules_to_add = deepcopy(self.rules)
- self.rules_to_modify = []
- self.rules_to_delete = []
+ self.listener_arn = self.current_listener.get("ListenerArn")
# If the listener exists (i.e. has an ARN) get rules for the listener
- if 'ListenerArn' in self.current_listener:
+ if "ListenerArn" in self.current_listener:
self.current_rules = self._get_elb_listener_rules()
else:
self.current_rules = []
@@ -887,20 +996,23 @@ class ELBListenerRules(object):
fixed_rules = []
for rule in rules:
fixed_actions = []
- for action in rule['Actions']:
- if 'TargetGroupName' in action:
- action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName'])
- del action['TargetGroupName']
+ for action in rule["Actions"]:
+ if "TargetGroupName" in action:
+ action["TargetGroupArn"] = convert_tg_name_to_arn(
+ self.connection, self.module, action["TargetGroupName"]
+ )
+ del action["TargetGroupName"]
fixed_actions.append(action)
- rule['Actions'] = fixed_actions
+ rule["Actions"] = fixed_actions
fixed_rules.append(rule)
return fixed_rules
def _get_elb_listener_rules(self):
-
try:
- return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules']
+ return AWSRetry.jittered_backoff()(self.connection.describe_rules)(
+ ListenerArn=self.current_listener["ListenerArn"]
+ )["Rules"]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -918,44 +1030,56 @@ class ELBListenerRules(object):
# host-header: current_condition includes both HostHeaderConfig AND Values while
# condition can be defined with either HostHeaderConfig OR Values. Only use
# HostHeaderConfig['Values'] comparison if both conditions includes HostHeaderConfig.
- if current_condition.get('HostHeaderConfig') and condition.get('HostHeaderConfig'):
- if (current_condition['Field'] == condition['Field'] and
- sorted(current_condition['HostHeaderConfig']['Values']) == sorted(condition['HostHeaderConfig']['Values'])):
+ if current_condition.get("HostHeaderConfig") and condition.get("HostHeaderConfig"):
+ if current_condition["Field"] == condition["Field"] and sorted(
+ current_condition["HostHeaderConfig"]["Values"]
+ ) == sorted(condition["HostHeaderConfig"]["Values"]):
condition_found = True
break
- elif current_condition.get('HttpHeaderConfig'):
- if (current_condition['Field'] == condition['Field'] and
- sorted(current_condition['HttpHeaderConfig']['Values']) == sorted(condition['HttpHeaderConfig']['Values']) and
- current_condition['HttpHeaderConfig']['HttpHeaderName'] == condition['HttpHeaderConfig']['HttpHeaderName']):
+ elif current_condition.get("HttpHeaderConfig"):
+ if (
+ current_condition["Field"] == condition["Field"]
+ and sorted(current_condition["HttpHeaderConfig"]["Values"])
+ == sorted(condition["HttpHeaderConfig"]["Values"])
+ and current_condition["HttpHeaderConfig"]["HttpHeaderName"]
+ == condition["HttpHeaderConfig"]["HttpHeaderName"]
+ ):
condition_found = True
break
- elif current_condition.get('HttpRequestMethodConfig'):
- if (current_condition['Field'] == condition['Field'] and
- sorted(current_condition['HttpRequestMethodConfig']['Values']) == sorted(condition['HttpRequestMethodConfig']['Values'])):
+ elif current_condition.get("HttpRequestMethodConfig"):
+ if current_condition["Field"] == condition["Field"] and sorted(
+ current_condition["HttpRequestMethodConfig"]["Values"]
+ ) == sorted(condition["HttpRequestMethodConfig"]["Values"]):
condition_found = True
break
# path-pattern: current_condition includes both PathPatternConfig AND Values while
# condition can be defined with either PathPatternConfig OR Values. Only use
# PathPatternConfig['Values'] comparison if both conditions includes PathPatternConfig.
- elif current_condition.get('PathPatternConfig') and condition.get('PathPatternConfig'):
- if (current_condition['Field'] == condition['Field'] and
- sorted(current_condition['PathPatternConfig']['Values']) == sorted(condition['PathPatternConfig']['Values'])):
+ elif current_condition.get("PathPatternConfig") and condition.get("PathPatternConfig"):
+ if current_condition["Field"] == condition["Field"] and sorted(
+ current_condition["PathPatternConfig"]["Values"]
+ ) == sorted(condition["PathPatternConfig"]["Values"]):
condition_found = True
break
- elif current_condition.get('QueryStringConfig'):
+ elif current_condition.get("QueryStringConfig"):
# QueryString Values is not sorted as it is the only list of dicts (not strings).
- if (current_condition['Field'] == condition['Field'] and
- current_condition['QueryStringConfig']['Values'] == condition['QueryStringConfig']['Values']):
+ if (
+ current_condition["Field"] == condition["Field"]
+ and current_condition["QueryStringConfig"]["Values"] == condition["QueryStringConfig"]["Values"]
+ ):
condition_found = True
break
- elif current_condition.get('SourceIpConfig'):
- if (current_condition['Field'] == condition['Field'] and
- sorted(current_condition['SourceIpConfig']['Values']) == sorted(condition['SourceIpConfig']['Values'])):
+ elif current_condition.get("SourceIpConfig"):
+ if current_condition["Field"] == condition["Field"] and sorted(
+ current_condition["SourceIpConfig"]["Values"]
+ ) == sorted(condition["SourceIpConfig"]["Values"]):
condition_found = True
break
# Not all fields are required to have Values list nested within a *Config dict
# e.g. fields host-header/path-pattern can directly list Values
- elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']):
+ elif current_condition["Field"] == condition["Field"] and sorted(current_condition["Values"]) == sorted(
+ condition["Values"]
+ ):
condition_found = True
break
@@ -970,36 +1094,39 @@ class ELBListenerRules(object):
modified_rule = {}
# Priority
- if int(current_rule['Priority']) != int(new_rule['Priority']):
- modified_rule['Priority'] = new_rule['Priority']
+ if int(current_rule["Priority"]) != int(new_rule["Priority"]):
+ modified_rule["Priority"] = new_rule["Priority"]
# Actions
# If the lengths of the actions are the same, we'll have to verify that the
# contents of those actions are the same
- if len(current_rule['Actions']) == len(new_rule['Actions']):
+ if len(current_rule["Actions"]) == len(new_rule["Actions"]):
# if actions have just one element, compare the contents and then update if
# they're different
- current_actions_sorted = _sort_actions(current_rule['Actions'])
- new_actions_sorted = _sort_actions(new_rule['Actions'])
+ copy_new_rule = deepcopy(new_rule)
+ current_actions_sorted = _sort_actions(current_rule["Actions"])
+ new_actions_sorted = _sort_actions(copy_new_rule["Actions"])
new_current_actions_sorted = [_append_use_existing_client_secretn(i) for i in current_actions_sorted]
new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
- if [_prune_ForwardConfig(i) for i in new_current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]:
- modified_rule['Actions'] = new_rule['Actions']
+ if [_prune_ForwardConfig(i) for i in new_current_actions_sorted] != [
+ _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret
+ ]:
+ modified_rule["Actions"] = new_rule["Actions"]
# If the action lengths are different, then replace with the new actions
else:
- modified_rule['Actions'] = new_rule['Actions']
+ modified_rule["Actions"] = new_rule["Actions"]
# Conditions
modified_conditions = []
- for condition in new_rule['Conditions']:
- if not self._compare_condition(current_rule['Conditions'], condition):
+ for condition in new_rule["Conditions"]:
+ if not self._compare_condition(current_rule["Conditions"], condition):
modified_conditions.append(condition)
if modified_conditions:
- modified_rule['Conditions'] = modified_conditions
+ modified_rule["Conditions"] = modified_conditions
return modified_rule
@@ -1012,34 +1139,73 @@ class ELBListenerRules(object):
rules_to_modify = []
rules_to_delete = []
rules_to_add = deepcopy(self.rules)
+ rules_to_set_priority = []
+
+ # List rules to update priority, 'Actions' and 'Conditions' remain the same
+ # only the 'Priority' has changed
+ current_rules = deepcopy(self.current_rules)
+ remaining_rules = []
+ while current_rules:
+ current_rule = current_rules.pop(0)
+ # Skip the default rule, this one can't be modified
+ if current_rule.get("IsDefault", False):
+ continue
+ to_keep = True
+ for new_rule in rules_to_add:
+ modified_rule = self._compare_rule(current_rule, new_rule)
+ if not modified_rule:
+ # The current rule has been passed with the same properties to the module
+ # Remove it for later comparison
+ rules_to_add.remove(new_rule)
+ to_keep = False
+ break
+ if modified_rule and list(modified_rule.keys()) == ["Priority"]:
+ # if only the Priority has changed
+ modified_rule["Priority"] = int(new_rule["Priority"])
+ modified_rule["RuleArn"] = current_rule["RuleArn"]
+
+ rules_to_set_priority.append(modified_rule)
+ to_keep = False
+ rules_to_add.remove(new_rule)
+ break
+ if to_keep:
+ remaining_rules.append(current_rule)
- for current_rule in self.current_rules:
+ for current_rule in remaining_rules:
current_rule_passed_to_module = False
- for new_rule in self.rules[:]:
- if current_rule['Priority'] == str(new_rule['Priority']):
+ for new_rule in rules_to_add:
+ if current_rule["Priority"] == str(new_rule["Priority"]):
current_rule_passed_to_module = True
# Remove what we match so that what is left can be marked as 'to be added'
rules_to_add.remove(new_rule)
modified_rule = self._compare_rule(current_rule, new_rule)
if modified_rule:
- modified_rule['Priority'] = int(current_rule['Priority'])
- modified_rule['RuleArn'] = current_rule['RuleArn']
- modified_rule['Actions'] = new_rule['Actions']
- modified_rule['Conditions'] = new_rule['Conditions']
+ modified_rule["Priority"] = int(current_rule["Priority"])
+ modified_rule["RuleArn"] = current_rule["RuleArn"]
+ modified_rule["Actions"] = new_rule["Actions"]
+ modified_rule["Conditions"] = new_rule["Conditions"]
+ # You cannot both specify a client secret and set UseExistingClientSecret to true
+ for action in modified_rule.get("Actions", []):
+ if action.get("AuthenticateOidcConfig", {}).get("ClientSecret", False):
+ action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = False
rules_to_modify.append(modified_rule)
break
# If the current rule was not matched against passed rules, mark for removal
- if not current_rule_passed_to_module and not current_rule['IsDefault']:
- rules_to_delete.append(current_rule['RuleArn'])
+ if not current_rule_passed_to_module and not current_rule.get("IsDefault", False):
+ rules_to_delete.append(current_rule["RuleArn"])
- return rules_to_add, rules_to_modify, rules_to_delete
+ # For rules to create 'UseExistingClientSecret' should be set to False
+ for rule in rules_to_add:
+ for action in rule.get("Actions", []):
+ if action.get("AuthenticateOidcConfig", {}).get("UseExistingClientSecret", False):
+ action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = False
+ return rules_to_add, rules_to_modify, rules_to_delete, rules_to_set_priority
-class ELBListenerRule(object):
+class ELBListenerRule:
def __init__(self, connection, module, rule, listener_arn):
-
self.connection = connection
self.module = module
self.rule = rule
@@ -1054,8 +1220,8 @@ class ELBListenerRule(object):
"""
try:
- self.rule['ListenerArn'] = self.listener_arn
- self.rule['Priority'] = int(self.rule['Priority'])
+ self.rule["ListenerArn"] = self.listener_arn
+ self.rule["Priority"] = int(self.rule["Priority"])
AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -1070,7 +1236,7 @@ class ELBListenerRule(object):
"""
try:
- del self.rule['Priority']
+ del self.rule["Priority"]
AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
@@ -1085,7 +1251,25 @@ class ELBListenerRule(object):
"""
try:
- AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn'])
+ AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule["RuleArn"])
+ except (BotoCoreError, ClientError) as e:
+ self.module.fail_json_aws(e)
+
+ self.changed = True
+
+ def set_rule_priorities(self):
+ """
+ Sets the priorities of the specified rules.
+
+ :return:
+ """
+
+ try:
+ rules = [self.rule]
+ if isinstance(self.rule, list):
+ rules = self.rule
+ rule_priorities = [{"RuleArn": rule["RuleArn"], "Priority": rule["Priority"]} for rule in rules]
+ AWSRetry.jittered_backoff()(self.connection.set_rule_priorities)(RulePriorities=rule_priorities)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/errors.py b/ansible_collections/amazon/aws/plugins/module_utils/errors.py
new file mode 100644
index 000000000..38e9b3800
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/errors.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+import functools
+
+try:
+ import botocore
+except ImportError:
+ pass # Modules are responsible for handling this.
+
+from .exceptions import AnsibleAWSError
+
+
+class AWSErrorHandler:
+
+ """_CUSTOM_EXCEPTION can be overridden by subclasses to customize the exception raised"""
+
+ _CUSTOM_EXCEPTION = AnsibleAWSError
+
+ @classmethod
+ def _is_missing(cls):
+ """Should be overridden with a class method that returns the value from is_boto3_error_code (or similar)"""
+ return type("NeverEverRaisedException", (Exception,), {})
+
+ @classmethod
+ def common_error_handler(cls, description):
+ """A simple error handler that catches the standard Boto3 exceptions and raises
+ an AnsibleAWSError exception.
+
+ param: description: a description of the action being taken.
+ Exception raised will include a message of
+ f"Timeout trying to {description}" or
+ f"Failed to {description}"
+ """
+
+ def wrapper(func):
+ @functools.wraps(func)
+ def handler(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except botocore.exceptions.WaiterError as e:
+ raise cls._CUSTOM_EXCEPTION(message=f"Timeout trying to {description}", exception=e) from e
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ raise cls._CUSTOM_EXCEPTION(message=f"Failed to {description}", exception=e) from e
+
+ return handler
+
+ return wrapper
+
+ @classmethod
+ def list_error_handler(cls, description, default_value=None):
+ """A simple error handler that catches the standard Boto3 exceptions and raises
+ an AnsibleAWSError exception.
+ Error codes representing a non-existent entity will result in None being returned
+ Generally used for Get/List calls where the exception just means the resource isn't there
+
+ param: description: a description of the action being taken.
+ Exception raised will include a message of
+ f"Timeout trying to {description}" or
+ f"Failed to {description}"
+ param: default_value: the value to return if no matching
+ resources are returned. Defaults to None
+ """
+
+ def wrapper(func):
+ @functools.wraps(func)
+ @cls.common_error_handler(description)
+ def handler(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except cls._is_missing():
+ return default_value
+
+ return handler
+
+ return wrapper
+
+ @classmethod
+ def deletion_error_handler(cls, description):
+ """A simple error handler that catches the standard Boto3 exceptions and raises
+ an AnsibleAWSError exception.
+ Error codes representing a non-existent entity will result in None being returned
+ Generally used in deletion calls where NoSuchEntity means it's already gone
+
+ param: description: a description of the action being taken.
+ Exception raised will include a message of
+ f"Timeout trying to {description}" or
+ f"Failed to {description}"
+ """
+
+ def wrapper(func):
+ @functools.wraps(func)
+ @cls.common_error_handler(description)
+ def handler(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except cls._is_missing():
+ return False
+
+ return handler
+
+ return wrapper
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py b/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py
new file mode 100644
index 000000000..893a62db9
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2022 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from ansible.module_utils._text import to_native
+
+
+class AnsibleAWSError(Exception):
+ def __str__(self):
+ if self.exception and self.message:
+ return f"{self.message}: {to_native(self.exception)}"
+
+ return super().__str__()
+
+ def __init__(self, message=None, exception=None, **kwargs):
+ if not message and not exception:
+ super().__init__()
+ elif not message:
+ super().__init__(exception)
+ else:
+ super().__init__(message)
+
+ self.exception = exception
+ self.message = message
+
+ # In places where passing more information to module.fail_json would be helpful
+ # store the extra info. Other plugin types have to raise the correct exception
+ # such as AnsibleLookupError, so can't easily consume this.
+ self.kwargs = kwargs or {}
+
+
+class AnsibleBotocoreError(AnsibleAWSError):
+ pass
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
index 6ebed23ba..430823f3b 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/iam.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
@@ -1,24 +1,280 @@
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+import re
+from copy import deepcopy
try:
import botocore
except ImportError:
- pass
+ pass # Modules are responsible for handling this.
from ansible.module_utils._text import to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from .arn import parse_aws_arn
+from .arn import validate_aws_arn
+from .botocore import is_boto3_error_code
+from .botocore import normalize_boto3_result
+from .errors import AWSErrorHandler
+from .exceptions import AnsibleAWSError
+from .retries import AWSRetry
+from .tagging import ansible_dict_to_boto3_tag_list
+from .tagging import boto3_tag_list_to_ansible_dict
+
+
+class AnsibleIAMError(AnsibleAWSError):
+ pass
+
+
+class IAMErrorHandler(AWSErrorHandler):
+ _CUSTOM_EXCEPTION = AnsibleIAMError
+
+ @classmethod
+ def _is_missing(cls):
+ return is_boto3_error_code("NoSuchEntity")
+
+
+@IAMErrorHandler.deletion_error_handler("detach group policy")
+@AWSRetry.jittered_backoff()
+def detach_iam_group_policy(client, arn, group):
+ client.detach_group_policy(PolicyArn=arn, GroupName=group)
+ return True
+
+
+@IAMErrorHandler.deletion_error_handler("detach role policy")
+@AWSRetry.jittered_backoff()
+def detach_iam_role_policy(client, arn, role):
+ client.detach_group_policy(PolicyArn=arn, RoleName=role)
+ return True
+
+
+@IAMErrorHandler.deletion_error_handler("detach user policy")
+@AWSRetry.jittered_backoff()
+def detach_iam_user_policy(client, arn, user):
+ client.detach_group_policy(PolicyArn=arn, UserName=user)
+ return True
+
+
+@AWSRetry.jittered_backoff()
+def _get_iam_instance_profiles(client, **kwargs):
+ return client.get_instance_profile(**kwargs)["InstanceProfile"]
+
+
+@AWSRetry.jittered_backoff()
+def _list_iam_instance_profiles(client, **kwargs):
+ paginator = client.get_paginator("list_instance_profiles")
+ return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"]
+
+
+@AWSRetry.jittered_backoff()
+def _list_iam_instance_profiles_for_role(client, **kwargs):
+ paginator = client.get_paginator("list_instance_profiles_for_role")
+ return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"]
+
+
+@IAMErrorHandler.list_error_handler("list policies for role", [])
+@AWSRetry.jittered_backoff()
+def list_iam_role_policies(client, role_name):
+ paginator = client.get_paginator("list_role_policies")
+ return paginator.paginate(RoleName=role_name).build_full_result()["PolicyNames"]
+
+
+@IAMErrorHandler.list_error_handler("list policies attached to role", [])
+@AWSRetry.jittered_backoff()
+def list_iam_role_attached_policies(client, role_name):
+ paginator = client.get_paginator("list_attached_role_policies")
+ return paginator.paginate(RoleName=role_name).build_full_result()["AttachedPolicies"]
+
+
+@IAMErrorHandler.list_error_handler("list users", [])
+@AWSRetry.jittered_backoff()
+def list_iam_users(client, path=None):
+ args = {}
+ if path is None:
+ args = {"PathPrefix": path}
+ paginator = client.get_paginator("list_users")
+ return paginator.paginate(**args).build_full_result()["Users"]
+
+
+@IAMErrorHandler.common_error_handler("list all managed policies")
+@AWSRetry.jittered_backoff()
+def list_iam_managed_policies(client, **kwargs):
+ paginator = client.get_paginator("list_policies")
+ return paginator.paginate(**kwargs).build_full_result()["Policies"]
+
+
+list_managed_policies = list_iam_managed_policies
+
+
+@IAMErrorHandler.list_error_handler("list entities for policy", [])
+@AWSRetry.jittered_backoff()
+def list_iam_entities_for_policy(client, arn):
+ paginator = client.get_paginator("list_entities_for_policy")
+ return paginator.paginate(PolicyArn=arn).build_full_result()
+
+
+@IAMErrorHandler.list_error_handler("list roles", [])
+@AWSRetry.jittered_backoff()
+def list_iam_roles(client, path=None):
+ args = {}
+ if path:
+ args["PathPrefix"] = path
+ paginator = client.get_paginator("list_roles")
+ return paginator.paginate(**args).build_full_result()["Roles"]
+
+
+@IAMErrorHandler.list_error_handler("list mfa devices", [])
+@AWSRetry.jittered_backoff()
+def list_iam_mfa_devices(client, user=None):
+ args = {}
+ if user:
+ args["UserName"] = user
+ paginator = client.get_paginator("list_mfa_devices")
+ return paginator.paginate(**args).build_full_result()["MFADevices"]
+
+
+@IAMErrorHandler.list_error_handler("get role")
+@AWSRetry.jittered_backoff()
+def get_iam_role(client, name):
+ return client.get_role(RoleName=name)["Role"]
+
+
+@IAMErrorHandler.list_error_handler("get group")
+@AWSRetry.jittered_backoff()
+def get_iam_group(client, name):
+ paginator = client.get_paginator("get_group")
+ return paginator.paginate(GroupName=name).build_full_result()
+
+
+@IAMErrorHandler.list_error_handler("get access keys for user", [])
+@AWSRetry.jittered_backoff()
+def get_iam_access_keys(client, user):
+ results = client.list_access_keys(UserName=user)
+ return normalize_iam_access_keys(results.get("AccessKeyMetadata", []))
+
+
+@IAMErrorHandler.list_error_handler("get user")
+@AWSRetry.jittered_backoff()
+def get_iam_user(client, user):
+ results = client.get_user(UserName=user)
+ return normalize_iam_user(results.get("User", []))
+
+
+def find_iam_managed_policy_by_name(client, name):
+ policies = list_iam_managed_policies(client)
+ for policy in policies:
+ if policy["PolicyName"] == name:
+ return policy
+ return None
+
+
+def get_iam_managed_policy_by_name(client, name):
+ # get_policy() requires an ARN, and list_policies() doesn't return all fields, so we need to do both :(
+ policy = find_iam_managed_policy_by_name(client, name)
+ if policy is None:
+ return None
+ return get_iam_managed_policy_by_arn(client, policy["Arn"])
+
+
+@IAMErrorHandler.common_error_handler("get policy")
+@AWSRetry.jittered_backoff()
+def get_iam_managed_policy_by_arn(client, arn):
+ policy = client.get_policy(PolicyArn=arn)["Policy"]
+ return policy
+
+
+@IAMErrorHandler.common_error_handler("list policy versions")
+@AWSRetry.jittered_backoff()
+def list_iam_managed_policy_versions(client, arn):
+ return client.list_policy_versions(PolicyArn=arn)["Versions"]
+
+
+@IAMErrorHandler.common_error_handler("get policy version")
+@AWSRetry.jittered_backoff()
+def get_iam_managed_policy_version(client, arn, version):
+ return client.get_policy_version(PolicyArn=arn, VersionId=version)["PolicyVersion"]
+
+
+def normalize_iam_mfa_device(device):
+ """Converts IAM MFA Device from the CamelCase boto3 format to the snake_case Ansible format"""
+ if not device:
+ return device
+ camel_device = camel_dict_to_snake_dict(device)
+ camel_device["tags"] = boto3_tag_list_to_ansible_dict(device.pop("Tags", []))
+ return camel_device
-from .ec2 import AWSRetry
-from .core import is_boto3_error_code
-from .core import parse_aws_arn
+
+def normalize_iam_mfa_devices(devices):
+ """Converts a list of IAM MFA Devices from the CamelCase boto3 format to the snake_case Ansible format"""
+ if not devices:
+ return []
+ devices = [normalize_iam_mfa_device(d) for d in devices]
+ return devices
+
+
+def normalize_iam_user(user):
+ """Converts IAM users from the CamelCase boto3 format to the snake_case Ansible format"""
+ if not user:
+ return user
+ camel_user = camel_dict_to_snake_dict(user)
+ camel_user["tags"] = boto3_tag_list_to_ansible_dict(user.pop("Tags", []))
+ return camel_user
+
+
+def normalize_iam_policy(policy):
+ """Converts IAM policies from the CamelCase boto3 format to the snake_case Ansible format"""
+ if not policy:
+ return policy
+ camel_policy = camel_dict_to_snake_dict(policy)
+ camel_policy["tags"] = boto3_tag_list_to_ansible_dict(policy.get("Tags", []))
+ return camel_policy
+
+
+def normalize_iam_group(group):
+ """Converts IAM Groups from the CamelCase boto3 format to the snake_case Ansible format"""
+ if not group:
+ return group
+ camel_group = camel_dict_to_snake_dict(normalize_boto3_result(group))
+ return camel_group
+
+
+def normalize_iam_access_key(access_key):
+ """Converts IAM access keys from the CamelCase boto3 format to the snake_case Ansible format"""
+ if not access_key:
+ return access_key
+ camel_key = camel_dict_to_snake_dict(normalize_boto3_result(access_key))
+ return camel_key
+
+
+def normalize_iam_access_keys(access_keys):
+ """Converts a list of IAM access keys from the CamelCase boto3 format to the snake_case Ansible format"""
+ if not access_keys:
+ return []
+ access_keys = [normalize_iam_access_key(k) for k in access_keys]
+ sorted_keys = sorted(access_keys, key=lambda d: d.get("create_date", None))
+ return sorted_keys
+
+
+def convert_managed_policy_names_to_arns(client, policy_names):
+ if all(validate_aws_arn(policy, service="iam") for policy in policy_names if policy is not None):
+ return policy_names
+ allpolicies = {}
+ policies = list_iam_managed_policies(client)
+
+ for policy in policies:
+ allpolicies[policy["PolicyName"]] = policy["Arn"]
+ allpolicies[policy["Arn"]] = policy["Arn"]
+ try:
+ return [allpolicies[policy] for policy in policy_names if policy is not None]
+ except KeyError as e:
+ raise AnsibleIAMError(message="Failed to find policy by name:" + str(e), exception=e) from e
def get_aws_account_id(module):
- """ Given an AnsibleAWSModule instance, get the active AWS account ID
- """
+ """Given an AnsibleAWSModule instance, get the active AWS account ID"""
return get_aws_account_info(module)[0]
@@ -40,36 +296,204 @@ def get_aws_account_info(module):
account_id = None
partition = None
try:
- sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff())
+ sts_client = module.client("sts", retry_decorator=AWSRetry.jittered_backoff())
caller_id = sts_client.get_caller_identity(aws_retry=True)
- account_id = caller_id.get('Account')
- partition = caller_id.get('Arn').split(':')[1]
+ account_id = caller_id.get("Account")
+ partition = caller_id.get("Arn").split(":")[1]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError):
try:
- iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
- _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':')
- except is_boto3_error_code('AccessDenied') as e:
+ iam_client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff())
+ _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)["User"][
+ "Arn"
+ ].split(":")
+ except is_boto3_error_code("AccessDenied") as e:
try:
except_msg = to_native(e.message)
except AttributeError:
except_msg = to_native(e)
result = parse_aws_arn(except_msg)
- if result is None or result['service'] != 'iam':
+ if result is None or result["service"] != "iam":
module.fail_json_aws(
e,
- msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
)
- account_id = result.get('account_id')
- partition = result.get('partition')
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ account_id = result.get("account_id")
+ partition = result.get("partition")
+ except ( # pylint: disable=duplicate-except
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e:
module.fail_json_aws(
e,
- msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
)
if account_id is None or partition is None:
module.fail_json(
- msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions."
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
)
return (to_native(account_id), to_native(partition))
+
+
+@IAMErrorHandler.common_error_handler("create instance profile")
+@AWSRetry.jittered_backoff()
+def create_iam_instance_profile(client, name, path, tags):
+ boto3_tags = ansible_dict_to_boto3_tag_list(tags or {})
+ path = path or "/"
+ result = client.create_instance_profile(InstanceProfileName=name, Path=path, Tags=boto3_tags)
+ return result["InstanceProfile"]
+
+
+@IAMErrorHandler.deletion_error_handler("delete instance profile")
+@AWSRetry.jittered_backoff()
+def delete_iam_instance_profile(client, name):
+ client.delete_instance_profile(InstanceProfileName=name)
+ # Error Handler will return False if the resource didn't exist
+ return True
+
+
+@IAMErrorHandler.common_error_handler("add role to instance profile")
+@AWSRetry.jittered_backoff()
+def add_role_to_iam_instance_profile(client, profile_name, role_name):
+ client.add_role_to_instance_profile(InstanceProfileName=profile_name, RoleName=role_name)
+ return True
+
+
+@IAMErrorHandler.deletion_error_handler("remove role from instance profile")
+@AWSRetry.jittered_backoff()
+def remove_role_from_iam_instance_profile(client, profile_name, role_name):
+ client.remove_role_from_instance_profile(InstanceProfileName=profile_name, RoleName=role_name)
+ # Error Handler will return False if the resource didn't exist
+ return True
+
+
+@IAMErrorHandler.list_error_handler("list instance profiles", [])
+def list_iam_instance_profiles(client, name=None, prefix=None, role=None):
+ """
+ Returns a list of IAM instance profiles in boto3 format.
+ Profiles need to be converted to Ansible format using normalize_iam_instance_profile before being displayed.
+
+ See also: normalize_iam_instance_profile
+ """
+ if role:
+ return _list_iam_instance_profiles_for_role(client, RoleName=role)
+ if name:
+ # Unlike the others this returns a single result, make this a list with 1 element.
+ return [_get_iam_instance_profiles(client, InstanceProfileName=name)]
+ if prefix:
+ return _list_iam_instance_profiles(client, PathPrefix=prefix)
+ return _list_iam_instance_profiles(client)
+
+
+def normalize_iam_instance_profile(profile, _v7_compat=False):
+ """
+ Converts a boto3 format IAM instance profile into "Ansible" format
+
+ _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE.
+ """
+
+ new_profile = camel_dict_to_snake_dict(deepcopy(profile))
+ if profile.get("Roles"):
+ new_profile["roles"] = [normalize_iam_role(role, _v7_compat=_v7_compat) for role in profile.get("Roles")]
+ if profile.get("Tags"):
+ new_profile["tags"] = boto3_tag_list_to_ansible_dict(profile.get("Tags"))
+ else:
+ new_profile["tags"] = {}
+ new_profile["original"] = profile
+ return new_profile
+
+
+def normalize_iam_role(role, _v7_compat=False):
+ """
+ Converts a boto3 format IAM instance role into "Ansible" format
+
+ _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE.
+ """
+
+ new_role = camel_dict_to_snake_dict(deepcopy(role))
+ if role.get("InstanceProfiles"):
+ new_role["instance_profiles"] = [
+ normalize_iam_instance_profile(profile, _v7_compat=_v7_compat) for profile in role.get("InstanceProfiles")
+ ]
+ if role.get("AssumeRolePolicyDocument"):
+ if _v7_compat:
+ # new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument")
+ new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument")
+ else:
+ new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument")
+
+ new_role["tags"] = boto3_tag_list_to_ansible_dict(role.get("Tags", []))
+ return new_role
+
+
+@IAMErrorHandler.common_error_handler("tag instance profile")
+@AWSRetry.jittered_backoff()
+def tag_iam_instance_profile(client, name, tags):
+ if not tags:
+ return
+ boto3_tags = ansible_dict_to_boto3_tag_list(tags or {})
+ result = client.tag_instance_profile(InstanceProfileName=name, Tags=boto3_tags)
+
+
+@IAMErrorHandler.common_error_handler("untag instance profile")
+@AWSRetry.jittered_backoff()
+def untag_iam_instance_profile(client, name, tags):
+ if not tags:
+ return
+ client.untag_instance_profile(InstanceProfileName=name, TagKeys=tags)
+
+
+@IAMErrorHandler.common_error_handler("tag managed policy")
+@AWSRetry.jittered_backoff()
+def tag_iam_policy(client, arn, tags):
+ if not tags:
+ return
+ boto3_tags = ansible_dict_to_boto3_tag_list(tags or {})
+ client.tag_policy(PolicyArn=arn, Tags=boto3_tags)
+
+
+@IAMErrorHandler.common_error_handler("untag managed policy")
+@AWSRetry.jittered_backoff()
+def untag_iam_policy(client, arn, tags):
+ if not tags:
+ return
+ client.untag_policy(PolicyArn=arn, TagKeys=tags)
+
+
+def _validate_iam_name(resource_type, name=None):
+ if name is None:
+ return None
+ LENGTHS = {"role": 64, "user": 64}
+ regex = r"[\w+=,.@-]+"
+ max_length = LENGTHS.get(resource_type, 128)
+ if len(name) > max_length:
+ return f"Length of {resource_type} name may not exceed {max_length}"
+ if not re.fullmatch(regex, name):
+ return f"{resource_type} name must match pattern {regex}"
+ return None
+
+
+def _validate_iam_path(resource_type, path=None):
+ if path is None:
+ return None
+ regex = r"\/([\w+=,.@-]+\/)*"
+ max_length = 512
+ if len(path) > max_length:
+ return f"Length of {resource_type} path may not exceed {max_length}"
+ if not path.endswith("/") or not path.startswith("/"):
+ return f"{resource_type} path must begin and end with /"
+ if not re.fullmatch(regex, path):
+ return f"{resource_type} path must match pattern {regex}"
+ return None
+
+
+def validate_iam_identifiers(resource_type, name=None, path=None):
+ name_problem = _validate_iam_name(resource_type, name)
+ if name_problem:
+ return name_problem
+ path_problem = _validate_iam_path(resource_type, path)
+ if path_problem:
+ return path_problem
+
+ return None
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/modules.py b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
index 7d4ba717f..8a2ff3c0b 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/modules.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
@@ -1,20 +1,7 @@
-#
+# -*- coding: utf-8 -*-
+
# Copyright 2017 Michael De La Rue | Ansible
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""This module adds shared support for generic Amazon AWS modules
@@ -50,41 +37,38 @@ The call will be retried the specified number of times, so the calling functions
don't need to be wrapped in the backoff decorator.
"""
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from functools import wraps
import logging
import os
import re
import traceback
-
try:
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
+from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible.module_utils._text import to_native
-from .botocore import HAS_BOTO3
+from .botocore import boto3_at_least
from .botocore import boto3_conn
+from .botocore import botocore_at_least
+from .botocore import check_sdk_version_supported
+from .botocore import gather_sdk_versions
from .botocore import get_aws_connection_info
from .botocore import get_aws_region
-from .botocore import gather_sdk_versions
-
-from .version import LooseVersion
+from .exceptions import AnsibleBotocoreError
+from .retries import RetryingBotoClientWrapper
# Currently only AnsibleAWSModule. However we have a lot of Copy and Paste code
# for Inventory and Lookup modules which we should refactor
-class AnsibleAWSModule(object):
+class AnsibleAWSModule:
"""An ansible module class for AWS modules
AnsibleAWSModule provides an a class for building modules which
@@ -95,12 +79,8 @@ class AnsibleAWSModule(object):
(available on #ansible-aws on IRC) to request the additional
features needed.
"""
- default_settings = {
- "default_args": True,
- "check_boto3": True,
- "auto_retry": True,
- "module_class": AnsibleModule
- }
+
+ default_settings = {"default_args": True, "check_boto3": True, "auto_retry": True, "module_class": AnsibleModule}
def __init__(self, **kwargs):
local_settings = {}
@@ -122,40 +102,40 @@ class AnsibleAWSModule(object):
self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
if local_settings["check_boto3"]:
- if not HAS_BOTO3:
- self._module.fail_json(
- msg=missing_required_lib('botocore and boto3'))
- if not self.botocore_at_least('1.21.0'):
- self.warn('botocore < 1.21.0 is not supported or tested.'
- ' Some features may not work.')
- if not self.boto3_at_least("1.18.0"):
- self.warn('boto3 < 1.18.0 is not supported or tested.'
- ' Some features may not work.')
-
- deprecated_vars = {'EC2_REGION', 'EC2_SECURITY_TOKEN', 'EC2_SECRET_KEY', 'EC2_ACCESS_KEY',
- 'EC2_URL', 'S3_URL'}
+ try:
+ check_sdk_version_supported(warn=self.warn)
+ except AnsibleBotocoreError as e:
+ self._module.fail_json(to_native(e))
+
+ deprecated_vars = {"EC2_REGION", "EC2_SECURITY_TOKEN", "EC2_SECRET_KEY", "EC2_ACCESS_KEY", "EC2_URL", "S3_URL"}
if deprecated_vars.intersection(set(os.environ.keys())):
self._module.deprecate(
- "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', "
- "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment "
- "variables has been deprecated. "
- "These variables are currently used for all AWS services which can "
- "cause confusion. We recomend using the relevant module "
- "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', "
- "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' "
- "environment variables can be used instead.",
- date='2024-12-01', collection_name='amazon.aws',
+ (
+ "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', "
+ "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment "
+ "variables has been deprecated. "
+ "These variables are currently used for all AWS services which can "
+ "cause confusion. We recomend using the relevant module "
+ "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', "
+ "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' "
+ "environment variables can be used instead."
+ ),
+ date="2024-12-01",
+ collection_name="amazon.aws",
)
- if 'AWS_SECURITY_TOKEN' in os.environ.keys():
+ if "AWS_SECURITY_TOKEN" in os.environ.keys():
self._module.deprecate(
- "Support for the 'AWS_SECURITY_TOKEN' environment variable "
- "has been deprecated. This variable was based on the original "
- "boto SDK, support for which has now been dropped. "
- "We recommend using the 'session_token' module parameter "
- "or alternatively the 'AWS_SESSION_TOKEN' environment variable "
- "can be used instead.",
- date='2024-12-01', collection_name='amazon.aws',
+ (
+ "Support for the 'AWS_SECURITY_TOKEN' environment variable "
+ "has been deprecated. This variable was based on the original "
+ "boto SDK, support for which has now been dropped. "
+ "We recommend using the 'session_token' module parameter "
+ "or alternatively the 'AWS_SESSION_TOKEN' environment variable "
+ "can be used instead."
+ ),
+ date="2024-12-01",
+ collection_name="amazon.aws",
)
self.check_mode = self._module.check_mode
@@ -164,8 +144,8 @@ class AnsibleAWSModule(object):
self._botocore_endpoint_log_stream = StringIO()
self.logger = None
- if self.params.get('debug_botocore_endpoint_logs'):
- self.logger = logging.getLogger('botocore.endpoint')
+ if self.params.get("debug_botocore_endpoint_logs"):
+ self.logger = logging.getLogger("botocore.endpoint")
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
@@ -175,7 +155,7 @@ class AnsibleAWSModule(object):
def _get_resource_action_list(self):
actions = []
- for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
+ for ln in self._botocore_endpoint_log_stream.getvalue().split("\n"):
ln = ln.strip()
if not ln:
continue
@@ -183,17 +163,17 @@ class AnsibleAWSModule(object):
if found_operational_request:
operation_request = found_operational_request.group(0)[20:-1]
resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
- actions.append("{0}:{1}".format(resource, operation_request))
+ actions.append(f"{resource}:{operation_request}")
return list(set(actions))
def exit_json(self, *args, **kwargs):
- if self.params.get('debug_botocore_endpoint_logs'):
- kwargs['resource_actions'] = self._get_resource_action_list()
+ if self.params.get("debug_botocore_endpoint_logs"):
+ kwargs["resource_actions"] = self._get_resource_action_list()
return self._module.exit_json(*args, **kwargs)
def fail_json(self, *args, **kwargs):
- if self.params.get('debug_botocore_endpoint_logs'):
- kwargs['resource_actions'] = self._get_resource_action_list()
+ if self.params.get("debug_botocore_endpoint_logs"):
+ kwargs["resource_actions"] = self._get_resource_action_list()
return self._module.fail_json(*args, **kwargs)
def debug(self, *args, **kwargs):
@@ -211,16 +191,18 @@ class AnsibleAWSModule(object):
def md5(self, *args, **kwargs):
return self._module.md5(*args, **kwargs)
- def client(self, service, retry_decorator=None):
+ def client(self, service, retry_decorator=None, **extra_params):
region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
- conn = boto3_conn(self, conn_type='client', resource=service,
- region=region, endpoint=endpoint_url, **aws_connect_kwargs)
- return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
+ kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs)
+ kw_args.update(extra_params)
+ conn = boto3_conn(self, conn_type="client", resource=service, **kw_args)
+ return conn if retry_decorator is None else RetryingBotoClientWrapper(conn, retry_decorator)
- def resource(self, service):
+ def resource(self, service, **extra_params):
region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
- return boto3_conn(self, conn_type='resource', resource=service,
- region=region, endpoint=endpoint_url, **aws_connect_kwargs)
+ kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs)
+ kw_args.update(extra_params)
+ return boto3_conn(self, conn_type="resource", resource=service, **kw_args)
@property
def region(self):
@@ -242,7 +224,7 @@ class AnsibleAWSModule(object):
except_msg = to_native(exception)
if msg is not None:
- message = '{0}: {1}'.format(msg, except_msg)
+ message = f"{msg}: {except_msg}"
else:
message = except_msg
@@ -251,11 +233,7 @@ class AnsibleAWSModule(object):
except AttributeError:
response = None
- failure = dict(
- msg=message,
- exception=last_traceback,
- **self._gather_versions()
- )
+ failure = dict(msg=message, exception=last_traceback, **self._gather_versions())
failure.update(kwargs)
@@ -264,6 +242,12 @@ class AnsibleAWSModule(object):
self.fail_json(**failure)
+ def fail_json_aws_error(self, exception):
+ """A helper to call the right failure mode after catching an AnsibleAWSError"""
+ if exception.exception:
+ self.fail_json_aws(exception.exception, msg=exception.message)
+ self.fail_json(msg=exception.message)
+
def _gather_versions(self):
"""Gather AWS SDK (boto3 and botocore) dependency versions
@@ -287,20 +271,12 @@ class AnsibleAWSModule(object):
"""
if not self.boto3_at_least(desired):
self._module.fail_json(
- msg=missing_required_lib('boto3>={0}'.format(desired), **kwargs),
- **self._gather_versions()
+ msg=missing_required_lib(f"boto3>={desired}", **kwargs),
+ **self._gather_versions(),
)
def boto3_at_least(self, desired):
- """Check if the available boto3 version is greater than or equal to a desired version.
-
- Usage:
- if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
- # conditionally fail on old boto3 versions if a specific feature is not supported
- module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
- """
- existing = self._gather_versions()
- return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
+ return boto3_at_least(desired)
def require_botocore_at_least(self, desired, **kwargs):
"""Check if the available botocore version is greater than or equal to a desired version.
@@ -317,55 +293,12 @@ class AnsibleAWSModule(object):
"""
if not self.botocore_at_least(desired):
self._module.fail_json(
- msg=missing_required_lib('botocore>={0}'.format(desired), **kwargs),
- **self._gather_versions()
+ msg=missing_required_lib(f"botocore>={desired}", **kwargs),
+ **self._gather_versions(),
)
def botocore_at_least(self, desired):
- """Check if the available botocore version is greater than or equal to a desired version.
-
- Usage:
- if not module.botocore_at_least('1.2.3'):
- module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
- if not module.botocore_at_least('1.5.3'):
- module.warn('Botocore did not include waiters for Service X before 1.5.3. '
- 'To wait until Service X resources are fully available, update botocore.')
- """
- existing = self._gather_versions()
- return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
-
-
-class _RetryingBotoClientWrapper(object):
- __never_wait = (
- 'get_paginator', 'can_paginate',
- 'get_waiter', 'generate_presigned_url',
- )
-
- def __init__(self, client, retry):
- self.client = client
- self.retry = retry
-
- def _create_optional_retry_wrapper_function(self, unwrapped):
- retrying_wrapper = self.retry(unwrapped)
-
- @wraps(unwrapped)
- def deciding_wrapper(aws_retry=False, *args, **kwargs):
- if aws_retry:
- return retrying_wrapper(*args, **kwargs)
- else:
- return unwrapped(*args, **kwargs)
- return deciding_wrapper
-
- def __getattr__(self, name):
- unwrapped = getattr(self.client, name)
- if name in self.__never_wait:
- return unwrapped
- elif callable(unwrapped):
- wrapped = self._create_optional_retry_wrapper_function(unwrapped)
- setattr(self, name, wrapped)
- return wrapped
- else:
- return unwrapped
+ return botocore_at_least(desired)
def _aws_common_argument_spec():
@@ -376,55 +309,58 @@ def _aws_common_argument_spec():
"""
return dict(
access_key=dict(
- aliases=['aws_access_key_id', 'aws_access_key', 'ec2_access_key'],
+ aliases=["aws_access_key_id", "aws_access_key", "ec2_access_key"],
deprecated_aliases=[
- dict(name='ec2_access_key', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name="ec2_access_key", date="2024-12-01", collection_name="amazon.aws"),
],
+ fallback=(env_fallback, ["AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", "EC2_ACCESS_KEY"]),
no_log=False,
),
secret_key=dict(
- aliases=['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'],
+ aliases=["aws_secret_access_key", "aws_secret_key", "ec2_secret_key"],
deprecated_aliases=[
- dict(name='ec2_secret_key', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name="ec2_secret_key", date="2024-12-01", collection_name="amazon.aws"),
],
+ fallback=(env_fallback, ["AWS_SECRET_ACCESS_KEY", "AWS_SECRET_KEY", "EC2_SECRET_KEY"]),
no_log=True,
),
session_token=dict(
- aliases=['aws_session_token', 'security_token', 'access_token', 'aws_security_token'],
+ aliases=["aws_session_token", "security_token", "access_token", "aws_security_token"],
deprecated_aliases=[
- dict(name='access_token', date='2024-12-01', collection_name='amazon.aws'),
- dict(name='security_token', date='2024-12-01', collection_name='amazon.aws'),
- dict(name='aws_security_token', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name="access_token", date="2024-12-01", collection_name="amazon.aws"),
+ dict(name="security_token", date="2024-12-01", collection_name="amazon.aws"),
+ dict(name="aws_security_token", date="2024-12-01", collection_name="amazon.aws"),
],
+ fallback=(env_fallback, ["AWS_SESSION_TOKEN", "AWS_SECURITY_TOKEN", "EC2_SECURITY_TOKEN"]),
no_log=True,
),
profile=dict(
- aliases=['aws_profile'],
+ aliases=["aws_profile"],
+ fallback=(env_fallback, ["AWS_PROFILE", "AWS_DEFAULT_PROFILE"]),
),
-
endpoint_url=dict(
- aliases=['aws_endpoint_url', 'ec2_url', 's3_url'],
+ aliases=["aws_endpoint_url", "ec2_url", "s3_url"],
deprecated_aliases=[
- dict(name='ec2_url', date='2024-12-01', collection_name='amazon.aws'),
- dict(name='s3_url', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name="ec2_url", date="2024-12-01", collection_name="amazon.aws"),
+ dict(name="s3_url", date="2024-12-01", collection_name="amazon.aws"),
],
- fallback=(env_fallback, ['AWS_URL', 'EC2_URL', 'S3_URL']),
+ fallback=(env_fallback, ["AWS_URL", "EC2_URL", "S3_URL"]),
),
validate_certs=dict(
- type='bool',
+ type="bool",
default=True,
),
aws_ca_bundle=dict(
- type='path',
- fallback=(env_fallback, ['AWS_CA_BUNDLE']),
+ type="path",
+ fallback=(env_fallback, ["AWS_CA_BUNDLE"]),
),
aws_config=dict(
- type='dict',
+ type="dict",
),
debug_botocore_endpoint_logs=dict(
- type='bool',
+ type="bool",
default=False,
- fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']),
+ fallback=(env_fallback, ["ANSIBLE_DEBUG_BOTOCORE_LOGS"]),
),
)
@@ -435,11 +371,11 @@ def aws_argument_spec():
"""
region_spec = dict(
region=dict(
- aliases=['aws_region', 'ec2_region'],
+ aliases=["aws_region", "ec2_region"],
deprecated_aliases=[
- dict(name='ec2_region', date='2024-12-01', collection_name='amazon.aws'),
+ dict(name="ec2_region", date="2024-12-01", collection_name="amazon.aws"),
],
- fallback=(env_fallback, ['AWS_REGION', 'AWS_DEFAULT_REGION', 'EC2_REGION']),
+ fallback=(env_fallback, ["AWS_REGION", "AWS_DEFAULT_REGION", "EC2_REGION"]),
),
)
spec = _aws_common_argument_spec()
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/policy.py b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
index 4aeabd5f2..60b096f84 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/policy.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -26,33 +28,57 @@
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
from functools import cmp_to_key
+import ansible.module_utils.common.warnings as ansible_warnings
from ansible.module_utils._text import to_text
from ansible.module_utils.six import binary_type
from ansible.module_utils.six import string_types
+def _canonify_root_arn(arn):
+ # There are multiple ways to specifiy delegation of access to an account
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-accounts
+ if arn.startswith("arn:aws:iam::") and arn.endswith(":root"):
+ arn = arn.split(":")[4]
+ return arn
+
+
+def _canonify_policy_dict_item(item, key):
+ """
+ Converts special cases where there are multiple ways to write the same thing into a single form
+ """
+ # There are multiple ways to specify anonymous principals
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-anonymous
+ if key in ["NotPrincipal", "Principal"]:
+ if item == "*":
+ return {"AWS": "*"}
+ return item
+
+
+def _tuplify_list(element):
+ if isinstance(element, list):
+ return tuple(element)
+ return element
+
+
def _hashable_policy(policy, policy_list):
"""
- Takes a policy and returns a list, the contents of which are all hashable and sorted.
- Example input policy:
- {'Version': '2012-10-17',
- 'Statement': [{'Action': 's3:PutObjectAcl',
- 'Sid': 'AddCannedAcl2',
- 'Resource': 'arn:aws:s3:::test_policy/*',
- 'Effect': 'Allow',
- 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
- }]}
- Returned value:
- [('Statement', ((('Action', ('s3:PutObjectAcl',)),
- ('Effect', ('Allow',)),
- ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
- ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))),
- ('Version', ('2012-10-17',)))]
+ Takes a policy and returns a list, the contents of which are all hashable and sorted.
+ Example input policy:
+ {'Version': '2012-10-17',
+ 'Statement': [{'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }]}
+ Returned value:
+ [('Statement', ((('Action', ('s3:PutObjectAcl',)),
+ ('Effect', ('Allow',)),
+ ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
+ ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))),
+ ('Version', ('2012-10-17',)))]
"""
# Amazon will automatically convert bool and int to strings for us
@@ -63,30 +89,24 @@ def _hashable_policy(policy, policy_list):
if isinstance(policy, list):
for each in policy:
- tupleified = _hashable_policy(each, [])
- if isinstance(tupleified, list):
- tupleified = tuple(tupleified)
+ hashed_policy = _hashable_policy(each, [])
+ tupleified = _tuplify_list(hashed_policy)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
policy = to_text(policy)
# convert root account ARNs to just account IDs
- if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
- policy = policy.split(':')[4]
+ policy = _canonify_root_arn(policy)
return [policy]
elif isinstance(policy, dict):
+ # Sort the keys to ensure a consistent order for later comparison
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
- element = policy[key]
- # Special case defined in
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html
- if key in ["NotPrincipal", "Principal"] and policy[key] == "*":
- element = {"AWS": "*"}
- tupleified = _hashable_policy(element, [])
- if isinstance(tupleified, list):
- tupleified = tuple(tupleified)
+ # Converts special cases to a consistent form
+ element = _canonify_policy_dict_item(policy[key], key)
+ hashed_policy = _hashable_policy(element, [])
+ tupleified = _tuplify_list(hashed_policy)
policy_list.append((key, tupleified))
-
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
@@ -96,7 +116,7 @@ def _hashable_policy(policy, policy_list):
def _py3cmp(a, b):
- """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
+ """Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
try:
if a > b:
return 1
@@ -107,8 +127,8 @@ def _py3cmp(a, b):
except TypeError as e:
# check to see if they're tuple-string
# always say strings are less than tuples (to maintain compatibility with python2)
- str_ind = to_text(e).find('str')
- tup_ind = to_text(e).find('tuple')
+ str_ind = to_text(e).find("str")
+ tup_ind = to_text(e).find("tuple")
if -1 not in (str_ind, tup_ind):
if str_ind < tup_ind:
return -1
@@ -118,8 +138,8 @@ def _py3cmp(a, b):
def compare_policies(current_policy, new_policy, default_version="2008-10-17"):
- """ Compares the existing policy and the updated policy
- Returns True if there is a difference between policies.
+ """Compares the existing policy and the updated policy
+ Returns True if there is a difference between policies.
"""
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html
if default_version:
@@ -134,8 +154,10 @@ def compare_policies(current_policy, new_policy, default_version="2008-10-17"):
def sort_json_policy_dict(policy_dict):
+ """
+ DEPRECATED - will be removed in amazon.aws 8.0.0
- """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
+ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
@@ -151,8 +173,16 @@ def sort_json_policy_dict(policy_dict):
}
"""
- def value_is_list(my_list):
+ ansible_warnings.deprecate(
+ (
+ "amazon.aws.module_utils.policy.sort_json_policy_dict has been deprecated, consider using "
+ "amazon.aws.module_utils.policy.compare_policies instead"
+ ),
+ version="8.0.0",
+ collection_name="amazon.aws",
+ )
+ def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
index 8b5bcb67c..85cde2e4e 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/rds.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
@@ -1,54 +1,85 @@
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
from collections import namedtuple
from time import sleep
try:
- from botocore.exceptions import BotoCoreError, ClientError, WaiterError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import WaiterError
except ImportError:
pass
from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from .ec2 import AWSRetry
-from .ec2 import ansible_dict_to_boto3_tag_list
-from .ec2 import boto3_tag_list_to_ansible_dict
-from .ec2 import compare_aws_tags
+from .retries import AWSRetry
+from .tagging import ansible_dict_to_boto3_tag_list
+from .tagging import boto3_tag_list_to_ansible_dict
+from .tagging import compare_aws_tags
from .waiters import get_waiter
-Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'resource', 'retry_codes'])
+Boto3ClientMethod = namedtuple(
+ "Boto3ClientMethod", ["name", "waiter", "operation_description", "resource", "retry_codes"]
+)
# Whitelist boto3 client methods for cluster and instance resources
cluster_method_names = [
- 'create_db_cluster', 'restore_db_cluster_from_snapshot', 'restore_db_cluster_from_s3',
- 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
- 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
+ "create_db_cluster",
+ "restore_db_cluster_from_snapshot",
+ "restore_db_cluster_from_s3",
+ "restore_db_cluster_to_point_in_time",
+ "modify_db_cluster",
+ "delete_db_cluster",
+ "add_tags_to_resource",
+ "remove_tags_from_resource",
+ "list_tags_for_resource",
+ "promote_read_replica_db_cluster",
+ "stop_db_cluster",
+ "start_db_cluster",
]
instance_method_names = [
- 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
- 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
- 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
- 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance', 'add_role_to_db_instance',
- 'remove_role_from_db_instance'
+ "create_db_instance",
+ "restore_db_instance_to_point_in_time",
+ "restore_db_instance_from_s3",
+ "restore_db_instance_from_db_snapshot",
+ "create_db_instance_read_replica",
+ "modify_db_instance",
+ "delete_db_instance",
+ "add_tags_to_resource",
+ "remove_tags_from_resource",
+ "list_tags_for_resource",
+ "promote_read_replica",
+ "stop_db_instance",
+ "start_db_instance",
+ "reboot_db_instance",
+ "add_role_to_db_instance",
+ "remove_role_from_db_instance",
]
cluster_snapshot_method_names = [
- 'create_db_cluster_snapshot', 'delete_db_cluster_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource',
- 'list_tags_for_resource', 'copy_db_cluster_snapshot'
+ "create_db_cluster_snapshot",
+ "delete_db_cluster_snapshot",
+ "add_tags_to_resource",
+ "remove_tags_from_resource",
+ "list_tags_for_resource",
+ "copy_db_cluster_snapshot",
]
instance_snapshot_method_names = [
- 'create_db_snapshot', 'delete_db_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource',
- 'copy_db_snapshot', 'list_tags_for_resource'
+ "create_db_snapshot",
+ "delete_db_snapshot",
+ "add_tags_to_resource",
+ "remove_tags_from_resource",
+ "copy_db_snapshot",
+ "list_tags_for_resource",
]
def get_rds_method_attribute(method_name, module):
- '''
+ """
Returns rds attributes of the specified method.
Parameters:
@@ -66,134 +97,152 @@ def get_rds_method_attribute(method_name, module):
Raises:
NotImplementedError if wait is True but no waiter can be found for specified method
- '''
- waiter = ''
- readable_op = method_name.replace('_', ' ').replace('db', 'DB')
- resource = ''
+ """
+ waiter = ""
+ readable_op = method_name.replace("_", " ").replace("db", "DB")
+ resource = ""
retry_codes = []
- if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
- resource = 'cluster'
- if method_name == 'delete_db_cluster':
- waiter = 'cluster_deleted'
+ if method_name in cluster_method_names and "new_db_cluster_identifier" in module.params:
+ resource = "cluster"
+ if method_name == "delete_db_cluster":
+ waiter = "cluster_deleted"
else:
- waiter = 'cluster_available'
+ waiter = "cluster_available"
# Handle retry codes
- if method_name == 'restore_db_cluster_from_snapshot':
- retry_codes = ['InvalidDBClusterSnapshotState']
+ if method_name == "restore_db_cluster_from_snapshot":
+ retry_codes = ["InvalidDBClusterSnapshotState"]
else:
- retry_codes = ['InvalidDBClusterState']
- elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
- resource = 'instance'
- if method_name == 'delete_db_instance':
- waiter = 'db_instance_deleted'
- elif method_name == 'stop_db_instance':
- waiter = 'db_instance_stopped'
- elif method_name == 'add_role_to_db_instance':
- waiter = 'role_associated'
- elif method_name == 'remove_role_from_db_instance':
- waiter = 'role_disassociated'
- elif method_name == 'promote_read_replica':
- waiter = 'read_replica_promoted'
+ retry_codes = ["InvalidDBClusterState"]
+ elif method_name in instance_method_names and "new_db_instance_identifier" in module.params:
+ resource = "instance"
+ if method_name == "delete_db_instance":
+ waiter = "db_instance_deleted"
+ elif method_name == "stop_db_instance":
+ waiter = "db_instance_stopped"
+ elif method_name == "add_role_to_db_instance":
+ waiter = "role_associated"
+ elif method_name == "remove_role_from_db_instance":
+ waiter = "role_disassociated"
+ elif method_name == "promote_read_replica":
+ waiter = "read_replica_promoted"
+ elif method_name == "db_cluster_promoting":
+ waiter = "db_cluster_promoting"
else:
- waiter = 'db_instance_available'
+ waiter = "db_instance_available"
# Handle retry codes
- if method_name == 'restore_db_instance_from_db_snapshot':
- retry_codes = ['InvalidDBSnapshotState']
+ if method_name == "restore_db_instance_from_db_snapshot":
+ retry_codes = ["InvalidDBSnapshotState"]
else:
- retry_codes = ['InvalidDBInstanceState', 'InvalidDBSecurityGroupState']
- elif method_name in cluster_snapshot_method_names and 'db_cluster_snapshot_identifier' in module.params:
- resource = 'cluster_snapshot'
- if method_name == 'delete_db_cluster_snapshot':
- waiter = 'db_cluster_snapshot_deleted'
- retry_codes = ['InvalidDBClusterSnapshotState']
- elif method_name == 'create_db_cluster_snapshot':
- waiter = 'db_cluster_snapshot_available'
- retry_codes = ['InvalidDBClusterState']
+ retry_codes = ["InvalidDBInstanceState", "InvalidDBSecurityGroupState"]
+ elif method_name in cluster_snapshot_method_names and "db_cluster_snapshot_identifier" in module.params:
+ resource = "cluster_snapshot"
+ if method_name == "delete_db_cluster_snapshot":
+ waiter = "db_cluster_snapshot_deleted"
+ retry_codes = ["InvalidDBClusterSnapshotState"]
+ elif method_name == "create_db_cluster_snapshot":
+ waiter = "db_cluster_snapshot_available"
+ retry_codes = ["InvalidDBClusterState"]
else:
# Tagging
- waiter = 'db_cluster_snapshot_available'
- retry_codes = ['InvalidDBClusterSnapshotState']
- elif method_name in instance_snapshot_method_names and 'db_snapshot_identifier' in module.params:
- resource = 'instance_snapshot'
- if method_name == 'delete_db_snapshot':
- waiter = 'db_snapshot_deleted'
- retry_codes = ['InvalidDBSnapshotState']
- elif method_name == 'create_db_snapshot':
- waiter = 'db_snapshot_available'
- retry_codes = ['InvalidDBInstanceState']
+ waiter = "db_cluster_snapshot_available"
+ retry_codes = ["InvalidDBClusterSnapshotState"]
+ elif method_name in instance_snapshot_method_names and "db_snapshot_identifier" in module.params:
+ resource = "instance_snapshot"
+ if method_name == "delete_db_snapshot":
+ waiter = "db_snapshot_deleted"
+ retry_codes = ["InvalidDBSnapshotState"]
+ elif method_name == "create_db_snapshot":
+ waiter = "db_snapshot_available"
+ retry_codes = ["InvalidDBInstanceState"]
else:
# Tagging
- waiter = 'db_snapshot_available'
- retry_codes = ['InvalidDBSnapshotState']
+ waiter = "db_snapshot_available"
+ retry_codes = ["InvalidDBSnapshotState"]
else:
- if module.params.get('wait'):
- raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name))
+ if module.params.get("wait"):
+ raise NotImplementedError(
+ f"method {method_name} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py",
+ )
- return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op,
- resource=resource, retry_codes=retry_codes)
+ return Boto3ClientMethod(
+ name=method_name, waiter=waiter, operation_description=readable_op, resource=resource, retry_codes=retry_codes
+ )
def get_final_identifier(method_name, module):
updated_identifier = None
- apply_immediately = module.params.get('apply_immediately')
+ apply_immediately = module.params.get("apply_immediately")
resource = get_rds_method_attribute(method_name, module).resource
- if resource == 'cluster':
- identifier = module.params['db_cluster_identifier']
- updated_identifier = module.params['new_db_cluster_identifier']
- elif resource == 'instance':
- identifier = module.params['db_instance_identifier']
- updated_identifier = module.params['new_db_instance_identifier']
- elif resource == 'instance_snapshot':
- identifier = module.params['db_snapshot_identifier']
- elif resource == 'cluster_snapshot':
- identifier = module.params['db_cluster_snapshot_identifier']
+ if resource == "cluster":
+ identifier = module.params["db_cluster_identifier"]
+ updated_identifier = module.params["new_db_cluster_identifier"]
+ elif resource == "instance":
+ identifier = module.params["db_instance_identifier"]
+ updated_identifier = module.params["new_db_instance_identifier"]
+ elif resource == "instance_snapshot":
+ identifier = module.params["db_snapshot_identifier"]
+ elif resource == "cluster_snapshot":
+ identifier = module.params["db_cluster_snapshot_identifier"]
else:
- raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name))
+ raise NotImplementedError(
+ f"method {method_name} hasn't been added to the list of accepted methods in module_utils/rds.py",
+ )
if not module.check_mode and updated_identifier and apply_immediately:
identifier = updated_identifier
return identifier
def handle_errors(module, exception, method_name, parameters):
-
if not isinstance(exception, ClientError):
- module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
+ module.fail_json_aws(exception, msg=f"Unexpected failure for method {method_name} with parameters {parameters}")
changed = True
- error_code = exception.response['Error']['Code']
- if (
- method_name in ('modify_db_instance', 'modify_db_cluster') and
- error_code == 'InvalidParameterCombination'
- ):
- if 'No modifications were requested' in to_text(exception):
+ error_code = exception.response["Error"]["Code"]
+ if method_name in ("modify_db_instance", "modify_db_cluster") and error_code == "InvalidParameterCombination":
+ if "No modifications were requested" in to_text(exception):
changed = False
- elif 'ModifyDbCluster API' in to_text(exception):
- module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
+ elif "ModifyDbCluster API" in to_text(exception):
+ module.fail_json_aws(
+ exception,
+ msg="It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster",
+ )
else:
- module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
- elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
- if 'DB Instance is not a read replica' in to_text(exception):
+ module.fail_json_aws(
+ exception,
+ msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}",
+ )
+ elif method_name == "promote_read_replica" and error_code == "InvalidDBInstanceState":
+ if "DB Instance is not a read replica" in to_text(exception):
changed = False
else:
- module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
- elif method_name == 'promote_read_replica_db_cluster' and error_code == 'InvalidDBClusterStateFault':
- if 'DB Cluster that is not a read replica' in to_text(exception):
+ module.fail_json_aws(
+ exception,
+ msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}",
+ )
+ elif method_name == "promote_read_replica_db_cluster" and error_code == "InvalidDBClusterStateFault":
+ if "DB Cluster that is not a read replica" in to_text(exception):
changed = False
else:
module.fail_json_aws(
exception,
- msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description),
+ msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}",
)
elif method_name == "create_db_cluster" and error_code == "InvalidParameterValue":
accepted_engines = ["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"]
if parameters.get("Engine") not in accepted_engines:
module.fail_json_aws(
- exception, msg="DB engine {0} should be one of {1}".format(parameters.get("Engine"), accepted_engines)
+ exception, msg=f"DB engine {parameters.get('Engine')} should be one of {accepted_engines}"
)
else:
- module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ module.fail_json_aws(
+ exception,
+ msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}",
+ )
else:
- module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
+ module.fail_json_aws(
+ exception,
+ msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}",
+ )
return changed
@@ -202,7 +251,7 @@ def call_method(client, module, method_name, parameters):
result = {}
changed = True
if not module.check_mode:
- wait = module.params.get('wait')
+ wait = module.params.get("wait")
retry_codes = get_rds_method_attribute(method_name, module).retry_codes
method = getattr(client, method_name)
try:
@@ -223,26 +272,26 @@ def wait_for_instance_status(client, module, db_instance_id, waiter_name):
except ValueError:
# using a waiter in module_utils/waiters.py
waiter = get_waiter(client, waiter_name)
- waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
+ waiter.wait(WaiterConfig={"Delay": 60, "MaxAttempts": 60}, DBInstanceIdentifier=db_instance_id)
waiter_expected_status = {
- 'db_instance_deleted': 'deleted',
- 'db_instance_stopped': 'stopped',
+ "db_instance_deleted": "deleted",
+ "db_instance_stopped": "stopped",
}
- expected_status = waiter_expected_status.get(waiter_name, 'available')
+ expected_status = waiter_expected_status.get(waiter_name, "available")
for _wait_attempts in range(0, 10):
try:
wait(client, db_instance_id, waiter_name)
break
except WaiterError as e:
# Instance may be renamed and AWSRetry doesn't handle WaiterError
- if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
+ if e.last_response.get("Error", {}).get("Code") == "DBInstanceNotFound":
sleep(10)
continue
- module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
+ module.fail_json_aws(e, msg=f"Error while waiting for DB instance {db_instance_id} to be {expected_status}")
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
- db_instance_id, expected_status)
+ module.fail_json_aws(
+ e, msg=f"Unexpected error while waiting for DB instance {db_instance_id} to be {expected_status}"
)
@@ -250,39 +299,44 @@ def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
try:
get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
except WaiterError as e:
- if waiter_name == 'cluster_deleted':
- msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
+ if waiter_name == "cluster_deleted":
+ msg = f"Failed to wait for DB cluster {db_cluster_id} to be deleted"
else:
- msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
+ msg = f"Failed to wait for DB cluster {db_cluster_id} to be available"
module.fail_json_aws(e, msg=msg)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
+ module.fail_json_aws(e, msg=f"Failed with an unexpected error while waiting for the DB cluster {db_cluster_id}")
def wait_for_instance_snapshot_status(client, module, db_snapshot_id, waiter_name):
try:
client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id)
except WaiterError as e:
- if waiter_name == 'db_snapshot_deleted':
- msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id)
+ if waiter_name == "db_snapshot_deleted":
+ msg = f"Failed to wait for DB snapshot {db_snapshot_id} to be deleted"
else:
- msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id)
+ msg = f"Failed to wait for DB snapshot {db_snapshot_id} to be available"
module.fail_json_aws(e, msg=msg)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB snapshot {0}".format(db_snapshot_id))
+ module.fail_json_aws(
+ e, msg=f"Failed with an unexpected error while waiting for the DB snapshot {db_snapshot_id}"
+ )
def wait_for_cluster_snapshot_status(client, module, db_snapshot_id, waiter_name):
try:
client.get_waiter(waiter_name).wait(DBClusterSnapshotIdentifier=db_snapshot_id)
except WaiterError as e:
- if waiter_name == 'db_cluster_snapshot_deleted':
- msg = "Failed to wait for DB cluster snapshot {0} to be deleted".format(db_snapshot_id)
+ if waiter_name == "db_cluster_snapshot_deleted":
+ msg = f"Failed to wait for DB cluster snapshot {db_snapshot_id} to be deleted"
else:
- msg = "Failed to wait for DB cluster snapshot {0} to be available".format(db_snapshot_id)
+ msg = f"Failed to wait for DB cluster snapshot {db_snapshot_id} to be available"
module.fail_json_aws(e, msg=msg)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster snapshot {0}".format(db_snapshot_id))
+ module.fail_json_aws(
+ e,
+ msg=f"Failed with an unexpected error while waiting for the DB cluster snapshot {db_snapshot_id}",
+ )
def wait_for_status(client, module, identifier, method_name):
@@ -290,39 +344,37 @@ def wait_for_status(client, module, identifier, method_name):
waiter_name = rds_method_attributes.waiter
resource = rds_method_attributes.resource
- if resource == 'cluster':
+ if resource == "cluster":
wait_for_cluster_status(client, module, identifier, waiter_name)
- elif resource == 'instance':
+ elif resource == "instance":
wait_for_instance_status(client, module, identifier, waiter_name)
- elif resource == 'instance_snapshot':
+ elif resource == "instance_snapshot":
wait_for_instance_snapshot_status(client, module, identifier, waiter_name)
- elif resource == 'cluster_snapshot':
+ elif resource == "cluster_snapshot":
wait_for_cluster_snapshot_status(client, module, identifier, waiter_name)
def get_tags(client, module, resource_arn):
try:
- return boto3_tag_list_to_ansible_dict(
- client.list_tags_for_resource(ResourceName=resource_arn)['TagList']
- )
+ return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=resource_arn)["TagList"])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to describe tags")
def arg_spec_to_rds_params(options_dict):
- tags = options_dict.pop('tags')
+ tags = options_dict.pop("tags")
has_processor_features = False
- if 'processor_features' in options_dict:
+ if "processor_features" in options_dict:
has_processor_features = True
- processor_features = options_dict.pop('processor_features')
+ processor_features = options_dict.pop("processor_features")
camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
for key in list(camel_options.keys()):
- for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
+ for old, new in (("Db", "DB"), ("Iam", "IAM"), ("Az", "AZ"), ("Ca", "CA")):
if old in key:
camel_options[key.replace(old, new)] = camel_options.pop(key)
- camel_options['Tags'] = tags
+ camel_options["Tags"] = tags
if has_processor_features:
- camel_options['ProcessorFeatures'] = processor_features
+ camel_options["ProcessorFeatures"] = processor_features
return camel_options
@@ -333,19 +385,23 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
changed = bool(tags_to_add or tags_to_remove)
if tags_to_add:
call_method(
- client, module, method_name='add_tags_to_resource',
- parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
+ client,
+ module,
+ method_name="add_tags_to_resource",
+ parameters={"ResourceName": resource_arn, "Tags": ansible_dict_to_boto3_tag_list(tags_to_add)},
)
if tags_to_remove:
call_method(
- client, module, method_name='remove_tags_from_resource',
- parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
+ client,
+ module,
+ method_name="remove_tags_from_resource",
+ parameters={"ResourceName": resource_arn, "TagKeys": tags_to_remove},
)
return changed
def compare_iam_roles(existing_roles, target_roles, purge_roles):
- '''
+ """
Returns differences between target and existing IAM roles
Parameters:
@@ -356,15 +412,15 @@ def compare_iam_roles(existing_roles, target_roles, purge_roles):
Returns:
roles_to_add (list): List of IAM roles to add
roles_to_delete (list): List of IAM roles to delete
- '''
- existing_roles = [dict((k, v) for k, v in role.items() if k != 'status') for role in existing_roles]
+ """
+ existing_roles = [dict((k, v) for k, v in role.items() if k != "status") for role in existing_roles]
roles_to_add = [role for role in target_roles if role not in existing_roles]
roles_to_remove = [role for role in existing_roles if role not in target_roles] if purge_roles else []
return roles_to_add, roles_to_remove
def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove):
- '''
+ """
Update a DB instance's associated IAM roles
Parameters:
@@ -376,15 +432,11 @@ def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove)
Returns:
changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not
- '''
+ """
for role in roles_to_remove:
- params = {'DBInstanceIdentifier': instance_id,
- 'RoleArn': role['role_arn'],
- 'FeatureName': role['feature_name']}
- _result, changed = call_method(client, module, method_name='remove_role_from_db_instance', parameters=params)
+ params = {"DBInstanceIdentifier": instance_id, "RoleArn": role["role_arn"], "FeatureName": role["feature_name"]}
+ _result, changed = call_method(client, module, method_name="remove_role_from_db_instance", parameters=params)
for role in roles_to_add:
- params = {'DBInstanceIdentifier': instance_id,
- 'RoleArn': role['role_arn'],
- 'FeatureName': role['feature_name']}
- _result, changed = call_method(client, module, method_name='add_role_to_db_instance', parameters=params)
+ params = {"DBInstanceIdentifier": instance_id, "RoleArn": role["role_arn"], "FeatureName": role["feature_name"]}
+ _result, changed = call_method(client, module, method_name="add_role_to_db_instance", parameters=params)
return changed
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/retries.py b/ansible_collections/amazon/aws/plugins/module_utils/retries.py
index 1bd214b6b..110b1c8aa 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/retries.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/retries.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -26,11 +28,11 @@
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from functools import wraps
try:
from botocore.exceptions import ClientError
+
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
@@ -53,7 +55,7 @@ class AWSRetry(CloudRetry):
@staticmethod
def status_code_from_exception(error):
- return error.response['Error']['Code']
+ return error.response["Error"]["Code"]
@staticmethod
def found(response_code, catch_extra_error_codes=None):
@@ -68,11 +70,51 @@ class AWSRetry(CloudRetry):
#
# https://github.com/boto/boto3/issues/876 (and linked PRs etc)
retry_on = [
- 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
- 'InternalFailure', 'InternalError', 'TooManyRequestsException',
- 'Throttling'
+ "RequestLimitExceeded",
+ "Unavailable",
+ "ServiceUnavailable",
+ "InternalFailure",
+ "InternalError",
+ "TooManyRequestsException",
+ "Throttling",
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
return response_code in retry_on
+
+
+class RetryingBotoClientWrapper:
+ __never_wait = (
+ "get_paginator",
+ "can_paginate",
+ "get_waiter",
+ "generate_presigned_url",
+ )
+
+ def __init__(self, client, retry):
+ self.client = client
+ self.retry = retry
+
+ def _create_optional_retry_wrapper_function(self, unwrapped):
+ retrying_wrapper = self.retry(unwrapped)
+
+ @wraps(unwrapped)
+ def deciding_wrapper(*args, aws_retry=False, **kwargs):
+ if aws_retry:
+ return retrying_wrapper(*args, **kwargs)
+ else:
+ return unwrapped(*args, **kwargs)
+
+ return deciding_wrapper
+
+ def __getattr__(self, name):
+ unwrapped = getattr(self.client, name)
+ if name in self.__never_wait:
+ return unwrapped
+ elif callable(unwrapped):
+ wrapped = self._create_optional_retry_wrapper_function(unwrapped)
+ setattr(self, name, wrapped)
+ return wrapped
+ else:
+ return unwrapped
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/route53.py b/ansible_collections/amazon/aws/plugins/module_utils/route53.py
index 3e2940a53..38e12a52d 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/route53.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/route53.py
@@ -1,15 +1,14 @@
+# -*- coding: utf-8 -*-
+
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
@@ -24,9 +23,9 @@ def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags
change_params = dict()
if tags_to_set:
- change_params['AddTags'] = ansible_dict_to_boto3_tag_list(tags_to_set)
+ change_params["AddTags"] = ansible_dict_to_boto3_tag_list(tags_to_set)
if tags_to_delete:
- change_params['RemoveTagKeys'] = tags_to_delete
+ change_params["RemoveTagKeys"] = tags_to_delete
if not change_params:
return False
@@ -35,14 +34,14 @@ def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags
return True
try:
- client.change_tags_for_resource(
- ResourceType=resource_type,
- ResourceId=resource_id,
- **change_params
- )
+ client.change_tags_for_resource(ResourceType=resource_type, ResourceId=resource_id, **change_params)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg='Failed to update tags on {0}'.format(resource_type),
- resource_id=resource_id, change_params=change_params)
+ module.fail_json_aws(
+ e,
+ msg=f"Failed to update tags on {resource_type}",
+ resource_id=resource_id,
+ change_params=change_params,
+ )
return True
@@ -52,13 +51,15 @@ def get_tags(module, client, resource_type, resource_id):
ResourceType=resource_type,
ResourceId=resource_id,
)
- except is_boto3_error_code('NoSuchHealthCheck'):
+ except is_boto3_error_code("NoSuchHealthCheck"):
return {}
- except is_boto3_error_code('NoSuchHostedZone'): # pylint: disable=duplicate-except
+ except is_boto3_error_code("NoSuchHostedZone"): # pylint: disable=duplicate-except
return {}
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to fetch tags on {0}'.format(resource_type),
- resource_id=resource_id)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Failed to fetch tags on {resource_type}", resource_id=resource_id)
- tags = boto3_tag_list_to_ansible_dict(tagset['ResourceTagSet']['Tags'])
+ tags = boto3_tag_list_to_ansible_dict(tagset["ResourceTagSet"]["Tags"])
return tags
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
index c13c91f25..73297ffc7 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/s3.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
@@ -1,102 +1,153 @@
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+import string
+from urllib.parse import urlparse
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from hashlib import md5
+
+ HAS_MD5 = True
except ImportError:
- pass # Handled by the calling module
+ HAS_MD5 = False
-HAS_MD5 = True
try:
- from hashlib import md5
+ import botocore
except ImportError:
- try:
- from md5 import md5
- except ImportError:
- HAS_MD5 = False
+ pass # Handled by the calling module
-import string
+from ansible.module_utils.basic import to_text
+
+
+def s3_head_objects(client, parts, bucket, obj, versionId):
+ args = {"Bucket": bucket, "Key": obj}
+ if versionId:
+ args["VersionId"] = versionId
+
+ for part in range(1, parts + 1):
+ args["PartNumber"] = part
+ yield client.head_object(**args)
+
+
+def calculate_checksum_with_file(client, parts, bucket, obj, versionId, filename):
+ digests = []
+ with open(filename, "rb") as f:
+ for head in s3_head_objects(client, parts, bucket, obj, versionId):
+ digests.append(md5(f.read(int(head["ContentLength"]))).digest())
+
+ digest_squared = b"".join(digests)
+ return f'"{md5(digest_squared).hexdigest()}-{len(digests)}"'
+
+
+def calculate_checksum_with_content(client, parts, bucket, obj, versionId, content):
+ digests = []
+ offset = 0
+ for head in s3_head_objects(client, parts, bucket, obj, versionId):
+ length = int(head["ContentLength"])
+ digests.append(md5(content[offset:offset + length]).digest()) # fmt: skip
+ offset += length
+
+ digest_squared = b"".join(digests)
+ return f'"{md5(digest_squared).hexdigest()}-{len(digests)}"'
def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
- if '-' in etag:
+ if "-" in etag:
# Multi-part ETag; a hash of the hashes of each part.
- parts = int(etag[1:-1].split('-')[1])
- digests = []
-
- s3_kwargs = dict(
- Bucket=bucket,
- Key=obj,
- )
- if version:
- s3_kwargs['VersionId'] = version
-
- with open(filename, 'rb') as f:
- for part_num in range(1, parts + 1):
- s3_kwargs['PartNumber'] = part_num
- try:
- head = s3.head_object(**s3_kwargs)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get head object")
- digests.append(md5(f.read(int(head['ContentLength']))))
-
- digest_squared = md5(b''.join(m.digest() for m in digests))
- return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ parts = int(etag[1:-1].split("-")[1])
+ try:
+ return calculate_checksum_with_file(s3, parts, bucket, obj, version, filename)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
else: # Compute the MD5 sum normally
- return '"{0}"'.format(module.md5(filename))
+ return f'"{module.md5(filename)}"'
def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
- if '-' in etag:
+ if "-" in etag:
# Multi-part ETag; a hash of the hashes of each part.
- parts = int(etag[1:-1].split('-')[1])
- digests = []
- offset = 0
-
- s3_kwargs = dict(
- Bucket=bucket,
- Key=obj,
- )
- if version:
- s3_kwargs['VersionId'] = version
-
- for part_num in range(1, parts + 1):
- s3_kwargs['PartNumber'] = part_num
- try:
- head = s3.head_object(**s3_kwargs)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get head object")
- length = int(head['ContentLength'])
- digests.append(md5(content[offset:offset + length]))
- offset += length
-
- digest_squared = md5(b''.join(m.digest() for m in digests))
- return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ parts = int(etag[1:-1].split("-")[1])
+ try:
+ return calculate_checksum_with_content(s3, parts, bucket, obj, version, content)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
else: # Compute the MD5 sum normally
- return '"{0}"'.format(md5(content).hexdigest())
+ return f'"{md5(content).hexdigest()}"'
-def validate_bucket_name(module, name):
+def validate_bucket_name(name):
# See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
if len(name) < 3:
- module.fail_json(msg='the length of an S3 bucket must be at least 3 characters')
+ return "the length of an S3 bucket must be at least 3 characters"
if len(name) > 63:
- module.fail_json(msg='the length of an S3 bucket cannot exceed 63 characters')
+ return "the length of an S3 bucket cannot exceed 63 characters"
legal_characters = string.ascii_lowercase + ".-" + string.digits
illegal_characters = [c for c in name if c not in legal_characters]
if illegal_characters:
- module.fail_json(msg='invalid character(s) found in the bucket name')
+ return "invalid character(s) found in the bucket name"
if name[-1] not in string.ascii_lowercase + string.digits:
- module.fail_json(msg='bucket names must begin and end with a letter or number')
- return True
+ return "bucket names must begin and end with a letter or number"
+ return None
+
+
+# Spot special case of fakes3.
+def is_fakes3(url):
+ """Return True if endpoint_url has scheme fakes3://"""
+ result = False
+ if url is not None:
+ result = urlparse(url).scheme in ("fakes3", "fakes3s")
+ return result
+
+
+def parse_fakes3_endpoint(url):
+ fakes3 = urlparse(url)
+ protocol = "http"
+ port = fakes3.port or 80
+ if fakes3.scheme == "fakes3s":
+ protocol = "https"
+ port = fakes3.port or 443
+ endpoint_url = f"{protocol}://{fakes3.hostname}:{to_text(port)}"
+ use_ssl = bool(fakes3.scheme == "fakes3s")
+ return {"endpoint": endpoint_url, "use_ssl": use_ssl}
+
+
+def parse_ceph_endpoint(url):
+ ceph = urlparse(url)
+ use_ssl = bool(ceph.scheme == "https")
+ return {"endpoint": url, "use_ssl": use_ssl}
+
+
+def parse_s3_endpoint(options):
+ endpoint_url = options.get("endpoint_url")
+ if options.get("ceph"):
+ return False, parse_ceph_endpoint(endpoint_url)
+ if is_fakes3(endpoint_url):
+ return False, parse_fakes3_endpoint(endpoint_url)
+ return True, {"endpoint": endpoint_url}
+
+
+def s3_extra_params(options, sigv4=False):
+ aws, extra_params = parse_s3_endpoint(options)
+ endpoint = extra_params["endpoint"]
+ if not aws:
+ return extra_params
+ dualstack = options.get("dualstack")
+ if not dualstack and not sigv4:
+ return extra_params
+ config = {}
+ if dualstack:
+ config["use_dualstack_endpoint"] = True
+ if sigv4:
+ config["signature_version"] = "s3v4"
+ extra_params["config"] = config
+ return extra_params
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py
index 1568e4887..9201c8979 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -26,17 +28,13 @@
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
from ansible.module_utils._text import to_native
from ansible.module_utils._text import to_text
from ansible.module_utils.six import string_types
def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
-
- """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ """Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
@@ -60,7 +58,7 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_
if tag_name_key_name and tag_value_key_name:
tag_candidates = {tag_name_key_name: tag_value_key_name}
else:
- tag_candidates = {'key': 'value', 'Key': 'Value'}
+ tag_candidates = {"key": "value", "Key": "Value"}
# minio seems to return [{}] as an empty tags_list
if not tags_list or not any(tag for tag in tags_list):
@@ -68,12 +66,17 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_
for k, v in tag_candidates.items():
if k in tags_list[0] and v in tags_list[0]:
return dict((tag[k], tag[v]) for tag in tags_list)
- raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
+ raise ValueError(f"Couldn't find tag key (candidates {str(tag_candidates)}) in tag list {str(tags_list)}")
-def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
+def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name="Key", tag_value_key_name="Value"):
+ """Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
+
+ Note: booleans are converted to their Capitalized text form ("True" and "False"), this is
+ different to ansible_dict_to_boto3_filter_list because historically we've used "to_text()" and
+ AWS stores tags as strings, whereas for things which are actually booleans in AWS are returned
+ as lowercase strings in filters.
- """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
@@ -104,8 +107,36 @@ def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value
return tags_list
+def _tag_name_to_filter_key(tag_name):
+ return f"tag:{tag_name}"
+
+
+def ansible_dict_to_tag_filter_dict(tags_dict):
+ """Prepends "tag:" to all of the keys (not the values) in a dict
+ This is useful when you're then going to build a filter including the tags.
+
+ Note: booleans are converted to their Capitalized text form ("True" and "False"), this is
+ different to ansible_dict_to_boto3_filter_list because historically we've used "to_text()" and
+ AWS stores tags as strings, whereas for things which are actually booleans in AWS are returned
+ as lowercase strings in filters.
+
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+
+ Basic Usage:
+ >>> filters = ansible_dict_to_boto3_filter_list(ansible_dict_to_tag_filter_dict(tags))
+
+ Returns:
+ Dict: A dictionary suitable for passing to ansible_dict_to_boto3_filter_list which can
+ also be combined with other common filter parameters.
+ """
+ if not tags_dict:
+ return {}
+ return {_tag_name_to_filter_key(k): to_native(v) for k, v in tags_dict.items()}
+
+
def boto3_tag_specifications(tags_dict, types=None):
- """ Converts a list of resource types and a flat dictionary of key:value pairs representing AWS
+ """Converts a list of resource types and a flat dictionary of key:value pairs representing AWS
resource tags to a TagSpecification object.
https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TagSpecification.html
@@ -170,7 +201,7 @@ def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
continue
# Amazon have reserved 'aws:*' tags, we should avoid purging them as
# this probably isn't what people want to do...
- if key.startswith('aws:'):
+ if key.startswith("aws:"):
continue
tag_keys_to_unset.append(key)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tower.py b/ansible_collections/amazon/aws/plugins/module_utils/tower.py
index dd7d9738a..24726d4c2 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/tower.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/tower.py
@@ -1,9 +1,8 @@
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
import string
import textwrap
@@ -12,7 +11,9 @@ from ansible.module_utils.six.moves.urllib import parse as urlparse
def _windows_callback_script(passwd=None):
- script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'
+ script_url = (
+ "https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1"
+ )
if passwd is not None:
passwd = passwd.replace("'", "''")
script_tpl = """\
@@ -72,9 +73,7 @@ def _linux_callback_script(tower_address, template_id, host_config_key):
exit 1
"""
tpl = string.Template(textwrap.dedent(script_tpl))
- return tpl.safe_substitute(tower_address=tower_address,
- template_id=template_id,
- host_config_key=host_config_key)
+ return tpl.safe_substitute(tower_address=tower_address, template_id=template_id, host_config_key=host_config_key)
def tower_callback_script(tower_address, job_template_id, host_config_key, windows, passwd):
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py
index 70d38cd8a..708736fc0 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -26,16 +28,12 @@
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils.six import string_types
from ansible.module_utils.six import integer_types
+from ansible.module_utils.six import string_types
def ansible_dict_to_boto3_filter_list(filters_dict):
-
- """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ """Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
@@ -58,15 +56,15 @@ def ansible_dict_to_boto3_filter_list(filters_dict):
filters_list = []
for k, v in filters_dict.items():
- filter_dict = {'Name': k}
+ filter_dict = {"Name": k}
if isinstance(v, bool):
- filter_dict['Values'] = [str(v).lower()]
+ filter_dict["Values"] = [str(v).lower()]
elif isinstance(v, integer_types):
- filter_dict['Values'] = [str(v)]
+ filter_dict["Values"] = [str(v)]
elif isinstance(v, string_types):
- filter_dict['Values'] = [v]
+ filter_dict["Values"] = [v]
else:
- filter_dict['Values'] = v
+ filter_dict["Values"] = v
filters_list.append(filter_dict)
@@ -75,18 +73,18 @@ def ansible_dict_to_boto3_filter_list(filters_dict):
def map_complex_type(complex_type, type_map):
"""
- Allows to cast elements within a dictionary to a specific type
- Example of usage:
+ Allows to cast elements within a dictionary to a specific type
+ Example of usage:
- DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
- 'maximum_percent': 'int',
- 'minimum_healthy_percent': 'int'
- }
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int'
+ }
- deployment_configuration = map_complex_type(module.params['deployment_configuration'],
- DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
- This ensures all keys within the root element are casted and valid integers
+ This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
@@ -96,22 +94,16 @@ def map_complex_type(complex_type, type_map):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
- new_type[key] = map_complex_type(
- complex_type[key],
- type_map[key][0])
+ new_type[key] = map_complex_type(complex_type[key], type_map[key][0])
else:
- new_type[key] = map_complex_type(
- complex_type[key],
- type_map[key])
+ new_type[key] = map_complex_type(complex_type[key], type_map[key])
else:
new_type[key] = complex_type[key]
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
- new_type.append(map_complex_type(
- complex_type[i],
- type_map))
+ new_type.append(map_complex_type(complex_type[i], type_map))
elif type_map:
- return globals()['__builtins__'][type_map](complex_type)
+ return globals()["__builtins__"][type_map](complex_type)
return new_type
@@ -133,7 +125,10 @@ def scrub_none_parameters(parameters, descend_into_lists=True):
if isinstance(v, dict):
clean_parameters[k] = scrub_none_parameters(v, descend_into_lists=descend_into_lists)
elif descend_into_lists and isinstance(v, list):
- clean_parameters[k] = [scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv for vv in v]
+ clean_parameters[k] = [
+ scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv
+ for vv in v
+ ]
elif v is not None:
clean_parameters[k] = v
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/urls.py b/ansible_collections/amazon/aws/plugins/module_utils/urls.py
deleted file mode 100644
index 8011a1be9..000000000
--- a/ansible_collections/amazon/aws/plugins/module_utils/urls.py
+++ /dev/null
@@ -1,238 +0,0 @@
-# Copyright: (c) 2018, Aaron Haaf <aabonh@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import datetime
-import hashlib
-import hmac
-import operator
-
-try:
- from boto3 import session
-except ImportError:
- pass
-
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.urls import open_url
-
-from .ec2 import HAS_BOTO3
-from .ec2 import get_aws_connection_info
-
-import ansible.module_utils.common.warnings as ansible_warnings
-
-
-def hexdigest(s):
- """
- Returns the sha256 hexdigest of a string after encoding.
- """
-
- ansible_warnings.deprecate(
- 'amazon.aws.module_utils.urls.hexdigest is unused and has been deprecated.',
- version='7.0.0', collection_name='amazon.aws')
-
- return hashlib.sha256(s.encode("utf-8")).hexdigest()
-
-
-def format_querystring(params=None):
- """
- Returns properly url-encoded query string from the provided params dict.
-
- It's specially sorted for cannonical requests
- """
-
- ansible_warnings.deprecate(
- 'amazon.aws.module_utils.urls.format_querystring is unused and has been deprecated.',
- version='7.0.0', collection_name='amazon.aws')
-
- if not params:
- return ""
-
- # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name.
- return urlencode(sorted(params.items(), operator.itemgetter(0)))
-
-
-# Key derivation functions. See:
-# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
-def sign(key, msg):
- '''
- Return digest for key applied to msg
- '''
-
- ansible_warnings.deprecate(
- 'amazon.aws.module_utils.urls.sign is unused and has been deprecated.',
- version='7.0.0', collection_name='amazon.aws')
-
- return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
-
-
-def get_signature_key(key, dateStamp, regionName, serviceName):
- '''
- Returns signature key for AWS resource
- '''
-
- ansible_warnings.deprecate(
- 'amazon.aws.module_utils.urls.get_signature_key is unused and has been deprecated.',
- version='7.0.0', collection_name='amazon.aws')
-
- kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp)
- kRegion = sign(kDate, regionName)
- kService = sign(kRegion, serviceName)
- kSigning = sign(kService, "aws4_request")
- return kSigning
-
-
-def get_aws_credentials_object(module):
- '''
- Returns aws_access_key_id, aws_secret_access_key, session_token for a module.
- '''
-
- ansible_warnings.deprecate(
- 'amazon.aws.module_utils.urls.get_aws_credentials_object is unused and has been deprecated.',
- version='7.0.0', collection_name='amazon.aws')
-
- if not HAS_BOTO3:
- module.fail_json("get_aws_credentials_object requires boto3")
-
- dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True)
- s = session.Session(**boto_params)
-
- return s.get_credentials()
-
-
-# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
-def signed_request(
- module=None,
- method="GET", service=None, host=None, uri=None,
- query=None, body="", headers=None,
- session_in_header=True, session_in_query=False
-):
- """Generate a SigV4 request to an AWS resource for a module
-
- This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain.
-
- Returns :class:`HTTPResponse` object.
-
- Example:
- result = signed_request(
- module=this,
- service="es",
- host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com",
- )
-
- :kwarg host: endpoint to talk to
- :kwarg service: AWS id of service (like `ec2` or `es`)
- :kwarg module: An AnsibleAWSModule to gather connection info from
-
- :kwarg body: (optional) Payload to send
- :kwarg method: (optional) HTTP verb to use
- :kwarg query: (optional) dict of query params to handle
- :kwarg uri: (optional) Resource path without query parameters
-
- :kwarg session_in_header: (optional) Add the session token to the headers
- :kwarg session_in_query: (optional) Add the session token to the query parameters
-
- :returns: HTTPResponse
- """
-
- module.deprecate(
- 'amazon.aws.module_utils.urls.signed_request is unused and has been deprecated.',
- version='7.0.0', collection_name='amazon.aws')
-
- if not HAS_BOTO3:
- module.fail_json("A sigv4 signed_request requires boto3")
-
- # "Constants"
-
- t = datetime.datetime.utcnow()
- amz_date = t.strftime("%Y%m%dT%H%M%SZ")
- datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope
- algorithm = "AWS4-HMAC-SHA256"
-
- # AWS stuff
-
- region, dummy, dummy = get_aws_connection_info(module, boto3=True)
- credentials = get_aws_credentials_object(module)
- access_key = credentials.access_key
- secret_key = credentials.secret_key
- session_token = credentials.token
-
- if not access_key:
- module.fail_json(msg="aws_access_key_id is missing")
- if not secret_key:
- module.fail_json(msg="aws_secret_access_key is missing")
-
- credential_scope = "/".join([datestamp, region, service, "aws4_request"])
-
- # Argument Defaults
-
- uri = uri or "/"
- query_string = format_querystring(query) if query else ""
-
- headers = headers or dict()
- query = query or dict()
-
- headers.update({
- "host": host,
- "x-amz-date": amz_date,
- })
-
- # Handle adding of session_token if present
- if session_token:
- if session_in_header:
- headers["X-Amz-Security-Token"] = session_token
- if session_in_query:
- query["X-Amz-Security-Token"] = session_token
-
- if method == "GET":
- body = ""
-
- # Derived data
-
- body_hash = hexdigest(body)
- signed_headers = ";".join(sorted(headers.keys()))
-
- # Setup Cannonical request to generate auth token
-
- cannonical_headers = "\n".join([
- key.lower().strip() + ":" + value for key, value in headers.items()
- ]) + "\n" # Note additional trailing newline
-
- cannonical_request = "\n".join([
- method,
- uri,
- query_string,
- cannonical_headers,
- signed_headers,
- body_hash,
- ])
-
- string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)])
-
- # Sign the Cannonical request
-
- signing_key = get_signature_key(secret_key, datestamp, region, service)
- signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
-
- # Make auth header with that info
-
- authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format(
- algorithm, access_key, credential_scope, signed_headers, signature
- )
-
- # PERFORM THE REQUEST!
-
- url = "https://" + host + uri
-
- if query_string != "":
- url = url + "?" + query_string
-
- final_headers = {
- "x-amz-date": amz_date,
- "Authorization": authorization_header,
- }
-
- final_headers.update(headers)
-
- return open_url(url, method=method, data=body, headers=final_headers)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/version.py b/ansible_collections/amazon/aws/plugins/module_utils/version.py
index 8f4ca3638..444bde5d6 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/version.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/version.py
@@ -5,14 +5,6 @@
"""Provide version object to compare version numbers."""
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can
-# remove the _version.py file, and replace the following import by
-#
-# from ansible.module_utils.compat.version import LooseVersion
-
-from ._version import LooseVersion # pylint: disable=unused-import
+# This should be directly imported by modules, rather than importing from here.
+# The import is being kept for backwards compatibility.
+from ansible.module_utils.compat.version import LooseVersion # pylint: disable=unused-import
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waf.py b/ansible_collections/amazon/aws/plugins/module_utils/waf.py
index 226dca920..5e1cf1071 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/waf.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/waf.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Will Thames
#
# This code is part of Ansible, but is an independent component.
@@ -24,14 +26,11 @@
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
+
"""
This module adds shared support for Web Application Firewall modules
"""
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
try:
import botocore
except ImportError:
@@ -39,84 +38,78 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from .ec2 import AWSRetry
+from .retries import AWSRetry
from .waiters import get_waiter
-
MATCH_LOOKUP = {
- 'byte': {
- 'method': 'byte_match_set',
- 'conditionset': 'ByteMatchSet',
- 'conditiontuple': 'ByteMatchTuple',
- 'type': 'ByteMatch'
- },
- 'geo': {
- 'method': 'geo_match_set',
- 'conditionset': 'GeoMatchSet',
- 'conditiontuple': 'GeoMatchConstraint',
- 'type': 'GeoMatch'
+ "byte": {
+ "method": "byte_match_set",
+ "conditionset": "ByteMatchSet",
+ "conditiontuple": "ByteMatchTuple",
+ "type": "ByteMatch",
},
- 'ip': {
- 'method': 'ip_set',
- 'conditionset': 'IPSet',
- 'conditiontuple': 'IPSetDescriptor',
- 'type': 'IPMatch'
+ "geo": {
+ "method": "geo_match_set",
+ "conditionset": "GeoMatchSet",
+ "conditiontuple": "GeoMatchConstraint",
+ "type": "GeoMatch",
},
- 'regex': {
- 'method': 'regex_match_set',
- 'conditionset': 'RegexMatchSet',
- 'conditiontuple': 'RegexMatchTuple',
- 'type': 'RegexMatch'
+ "ip": {"method": "ip_set", "conditionset": "IPSet", "conditiontuple": "IPSetDescriptor", "type": "IPMatch"},
+ "regex": {
+ "method": "regex_match_set",
+ "conditionset": "RegexMatchSet",
+ "conditiontuple": "RegexMatchTuple",
+ "type": "RegexMatch",
},
- 'size': {
- 'method': 'size_constraint_set',
- 'conditionset': 'SizeConstraintSet',
- 'conditiontuple': 'SizeConstraint',
- 'type': 'SizeConstraint'
+ "size": {
+ "method": "size_constraint_set",
+ "conditionset": "SizeConstraintSet",
+ "conditiontuple": "SizeConstraint",
+ "type": "SizeConstraint",
},
- 'sql': {
- 'method': 'sql_injection_match_set',
- 'conditionset': 'SqlInjectionMatchSet',
- 'conditiontuple': 'SqlInjectionMatchTuple',
- 'type': 'SqlInjectionMatch',
+ "sql": {
+ "method": "sql_injection_match_set",
+ "conditionset": "SqlInjectionMatchSet",
+ "conditiontuple": "SqlInjectionMatchTuple",
+ "type": "SqlInjectionMatch",
},
- 'xss': {
- 'method': 'xss_match_set',
- 'conditionset': 'XssMatchSet',
- 'conditiontuple': 'XssMatchTuple',
- 'type': 'XssMatch'
+ "xss": {
+ "method": "xss_match_set",
+ "conditionset": "XssMatchSet",
+ "conditiontuple": "XssMatchTuple",
+ "type": "XssMatch",
},
}
@AWSRetry.jittered_backoff(delay=5)
def get_rule_with_backoff(client, rule_id):
- return client.get_rule(RuleId=rule_id)['Rule']
+ return client.get_rule(RuleId=rule_id)["Rule"]
@AWSRetry.jittered_backoff(delay=5)
def get_byte_match_set_with_backoff(client, byte_match_set_id):
- return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet']
+ return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)["ByteMatchSet"]
@AWSRetry.jittered_backoff(delay=5)
def get_ip_set_with_backoff(client, ip_set_id):
- return client.get_ip_set(IPSetId=ip_set_id)['IPSet']
+ return client.get_ip_set(IPSetId=ip_set_id)["IPSet"]
@AWSRetry.jittered_backoff(delay=5)
def get_size_constraint_set_with_backoff(client, size_constraint_set_id):
- return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet']
+ return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)["SizeConstraintSet"]
@AWSRetry.jittered_backoff(delay=5)
def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id):
- return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet']
+ return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)["SqlInjectionMatchSet"]
@AWSRetry.jittered_backoff(delay=5)
def get_xss_match_set_with_backoff(client, xss_match_set_id):
- return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet']
+ return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)["XssMatchSet"]
def get_rule(client, module, rule_id):
@@ -126,24 +119,24 @@ def get_rule(client, module, rule_id):
module.fail_json_aws(e, msg="Couldn't obtain waf rule")
match_sets = {
- 'ByteMatch': get_byte_match_set_with_backoff,
- 'IPMatch': get_ip_set_with_backoff,
- 'SizeConstraint': get_size_constraint_set_with_backoff,
- 'SqlInjectionMatch': get_sql_injection_match_set_with_backoff,
- 'XssMatch': get_xss_match_set_with_backoff
+ "ByteMatch": get_byte_match_set_with_backoff,
+ "IPMatch": get_ip_set_with_backoff,
+ "SizeConstraint": get_size_constraint_set_with_backoff,
+ "SqlInjectionMatch": get_sql_injection_match_set_with_backoff,
+ "XssMatch": get_xss_match_set_with_backoff,
}
- if 'Predicates' in rule:
- for predicate in rule['Predicates']:
- if predicate['Type'] in match_sets:
- predicate.update(match_sets[predicate['Type']](client, predicate['DataId']))
+ if "Predicates" in rule:
+ for predicate in rule["Predicates"]:
+ if predicate["Type"] in match_sets:
+ predicate.update(match_sets[predicate["Type"]](client, predicate["DataId"]))
# replaced by Id from the relevant MatchSet
- del predicate['DataId']
+ del predicate["DataId"]
return rule
@AWSRetry.jittered_backoff(delay=5)
def get_web_acl_with_backoff(client, web_acl_id):
- return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
+ return client.get_web_acl(WebACLId=web_acl_id)["WebACL"]
def get_web_acl(client, module, web_acl_id):
@@ -154,8 +147,8 @@ def get_web_acl(client, module, web_acl_id):
if web_acl:
try:
- for rule in web_acl['Rules']:
- rule.update(get_rule(client, module, rule['RuleId']))
+ for rule in web_acl["Rules"]:
+ rule.update(get_rule(client, module, rule["RuleId"]))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acl rule")
return camel_dict_to_snake_dict(web_acl)
@@ -163,8 +156,8 @@ def get_web_acl(client, module, web_acl_id):
@AWSRetry.jittered_backoff(delay=5)
def list_rules_with_backoff(client):
- paginator = client.get_paginator('list_rules')
- return paginator.paginate().build_full_result()['Rules']
+ paginator = client.get_paginator("list_rules")
+ return paginator.paginate().build_full_result()["Rules"]
@AWSRetry.jittered_backoff(delay=5)
@@ -172,15 +165,15 @@ def list_regional_rules_with_backoff(client):
resp = client.list_rules()
rules = []
while resp:
- rules += resp['Rules']
- resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
+ rules += resp["Rules"]
+ resp = client.list_rules(NextMarker=resp["NextMarker"]) if "NextMarker" in resp else None
return rules
@AWSRetry.jittered_backoff(delay=5)
def list_web_acls_with_backoff(client):
- paginator = client.get_paginator('list_web_acls')
- return paginator.paginate().build_full_result()['WebACLs']
+ paginator = client.get_paginator("list_web_acls")
+ return paginator.paginate().build_full_result()["WebACLs"]
@AWSRetry.jittered_backoff(delay=5)
@@ -188,16 +181,16 @@ def list_regional_web_acls_with_backoff(client):
resp = client.list_web_acls()
acls = []
while resp:
- acls += resp['WebACLs']
- resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
+ acls += resp["WebACLs"]
+ resp = client.list_web_acls(NextMarker=resp["NextMarker"]) if "NextMarker" in resp else None
return acls
def list_web_acls(client, module):
try:
- if client.__class__.__name__ == 'WAF':
+ if client.__class__.__name__ == "WAF":
return list_web_acls_with_backoff(client)
- elif client.__class__.__name__ == 'WAFRegional':
+ elif client.__class__.__name__ == "WAFRegional":
return list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acls")
@@ -206,19 +199,18 @@ def list_web_acls(client, module):
def get_change_token(client, module):
try:
token = client.get_change_token()
- return token['ChangeToken']
+ return token["ChangeToken"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain change token")
-@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=['WAFStaleDataException'])
+@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=["WAFStaleDataException"])
def run_func_with_change_token_backoff(client, module, params, func, wait=False):
- params['ChangeToken'] = get_change_token(client, module)
+ params["ChangeToken"] = get_change_token(client, module)
result = func(**params)
if wait:
get_waiter(
- client, 'change_token_in_sync',
- ).wait(
- ChangeToken=result['ChangeToken']
- )
+ client,
+ "change_token_in_sync",
+ ).wait(ChangeToken=result["ChangeToken"])
return result
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
index 2abf390cb..51d6b4568 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py
@@ -1,9 +1,8 @@
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import copy
try:
@@ -11,8 +10,7 @@ try:
except ImportError:
pass # caught by HAS_BOTO3
-from ansible_collections.amazon.aws.plugins.module_utils.modules import _RetryingBotoClientWrapper
-
+from ansible_collections.amazon.aws.plugins.module_utils.retries import RetryingBotoClientWrapper
ec2_data = {
"version": 2,
@@ -22,37 +20,19 @@ ec2_data = {
"maxAttempts": 80,
"delay": 15,
"acceptors": [
- {
- "state": "success",
- "matcher": "pathAll",
- "argument": "Images[].State",
- "expected": "available"
- },
- {
- "state": "failure",
- "matcher": "pathAny",
- "argument": "Images[].State",
- "expected": "failed"
- }
- ]
+ {"state": "success", "matcher": "pathAll", "argument": "Images[].State", "expected": "available"},
+ {"matcher": "error", "expected": "InvalidAMIID.NotFound", "state": "retry"},
+ {"state": "failure", "matcher": "pathAny", "argument": "Images[].State", "expected": "failed"},
+ ],
},
"InternetGatewayExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeInternetGateways",
"acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(InternetGateways) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidInternetGatewayID.NotFound",
- "state": "retry"
- },
- ]
+ {"matcher": "path", "expected": True, "argument": "length(InternetGateways) > `0`", "state": "success"},
+ {"matcher": "error", "expected": "InvalidInternetGatewayID.NotFound", "state": "retry"},
+ ],
},
"InternetGatewayAttached": {
"operation": "DescribeInternetGateways",
@@ -63,14 +43,10 @@ ec2_data = {
"expected": "available",
"matcher": "pathAll",
"state": "success",
- "argument": "InternetGateways[].Attachments[].State"
+ "argument": "InternetGateways[].Attachments[].State",
},
- {
- "matcher": "error",
- "expected": "InvalidInternetGatewayID.NotFound",
- "state": "retry"
- },
- ]
+ {"matcher": "error", "expected": "InvalidInternetGatewayID.NotFound", "state": "retry"},
+ ],
},
"NetworkInterfaceAttached": {
"operation": "DescribeNetworkInterfaces",
@@ -81,14 +57,10 @@ ec2_data = {
"expected": "attached",
"matcher": "pathAll",
"state": "success",
- "argument": "NetworkInterfaces[].Attachment.Status"
+ "argument": "NetworkInterfaces[].Attachment.Status",
},
- {
- "expected": "InvalidNetworkInterfaceID.NotFound",
- "matcher": "error",
- "state": "failure"
- },
- ]
+ {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"},
+ ],
},
"NetworkInterfaceAvailable": {
"operation": "DescribeNetworkInterfaces",
@@ -99,14 +71,10 @@ ec2_data = {
"expected": "available",
"matcher": "pathAll",
"state": "success",
- "argument": "NetworkInterfaces[].Status"
+ "argument": "NetworkInterfaces[].Status",
},
- {
- "expected": "InvalidNetworkInterfaceID.NotFound",
- "matcher": "error",
- "state": "retry"
- },
- ]
+ {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "retry"},
+ ],
},
"NetworkInterfaceDeleted": {
"operation": "DescribeNetworkInterfaces",
@@ -117,20 +85,16 @@ ec2_data = {
"matcher": "path",
"expected": True,
"argument": "length(NetworkInterfaces[]) > `0`",
- "state": "retry"
+ "state": "retry",
},
{
"matcher": "path",
"expected": True,
"argument": "length(NetworkInterfaces[]) == `0`",
- "state": "success"
- },
- {
- "expected": "InvalidNetworkInterfaceID.NotFound",
- "matcher": "error",
- "state": "success"
+ "state": "success",
},
- ]
+ {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "success"},
+ ],
},
"NetworkInterfaceDeleteOnTerminate": {
"operation": "DescribeNetworkInterfaces",
@@ -141,14 +105,10 @@ ec2_data = {
"expected": True,
"matcher": "pathAll",
"state": "success",
- "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination"
+ "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination",
},
- {
- "expected": "InvalidNetworkInterfaceID.NotFound",
- "matcher": "error",
- "state": "failure"
- },
- ]
+ {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"},
+ ],
},
"NetworkInterfaceNoDeleteOnTerminate": {
"operation": "DescribeNetworkInterfaces",
@@ -159,94 +119,53 @@ ec2_data = {
"expected": False,
"matcher": "pathAll",
"state": "success",
- "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination"
+ "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination",
},
- {
- "expected": "InvalidNetworkInterfaceID.NotFound",
- "matcher": "error",
- "state": "failure"
- },
- ]
+ {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"},
+ ],
},
"RouteTableExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeRouteTables",
"acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(RouteTables[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidRouteTableID.NotFound",
- "state": "retry"
- },
- ]
+ {"matcher": "path", "expected": True, "argument": "length(RouteTables[]) > `0`", "state": "success"},
+ {"matcher": "error", "expected": "InvalidRouteTableID.NotFound", "state": "retry"},
+ ],
},
"SecurityGroupExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSecurityGroups",
"acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(SecurityGroups[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidGroup.NotFound",
- "state": "retry"
- },
- ]
+ {"matcher": "path", "expected": True, "argument": "length(SecurityGroups[]) > `0`", "state": "success"},
+ {"matcher": "error", "expected": "InvalidGroup.NotFound", "state": "retry"},
+ ],
},
"SnapshotCompleted": {
"delay": 15,
"operation": "DescribeSnapshots",
"maxAttempts": 40,
"acceptors": [
- {
- "expected": "completed",
- "matcher": "pathAll",
- "state": "success",
- "argument": "Snapshots[].State"
- }
- ]
+ {"expected": "completed", "matcher": "pathAll", "state": "success", "argument": "Snapshots[].State"}
+ ],
},
"SubnetAvailable": {
"delay": 15,
"operation": "DescribeSubnets",
"maxAttempts": 40,
"acceptors": [
- {
- "expected": "available",
- "matcher": "pathAll",
- "state": "success",
- "argument": "Subnets[].State"
- }
- ]
+ {"expected": "available", "matcher": "pathAll", "state": "success", "argument": "Subnets[].State"}
+ ],
},
"SubnetExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(Subnets[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidSubnetID.NotFound",
- "state": "retry"
- },
- ]
+ {"matcher": "path", "expected": True, "argument": "length(Subnets[]) > `0`", "state": "success"},
+ {"matcher": "error", "expected": "InvalidSubnetID.NotFound", "state": "retry"},
+ ],
},
"SubnetHasMapPublic": {
"delay": 5,
@@ -257,9 +176,9 @@ ec2_data = {
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].MapPublicIpOnLaunch",
- "state": "success"
+ "state": "success",
},
- ]
+ ],
},
"SubnetNoMapPublic": {
"delay": 5,
@@ -270,9 +189,9 @@ ec2_data = {
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].MapPublicIpOnLaunch",
- "state": "success"
+ "state": "success",
},
- ]
+ ],
},
"SubnetHasAssignIpv6": {
"delay": 5,
@@ -283,9 +202,9 @@ ec2_data = {
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
- "state": "success"
+ "state": "success",
},
- ]
+ ],
},
"SubnetNoAssignIpv6": {
"delay": 5,
@@ -296,93 +215,53 @@ ec2_data = {
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
- "state": "success"
+ "state": "success",
},
- ]
+ ],
},
"SubnetDeleted": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(Subnets[]) > `0`",
- "state": "retry"
- },
- {
- "matcher": "error",
- "expected": "InvalidSubnetID.NotFound",
- "state": "success"
- },
- ]
+ {"matcher": "path", "expected": True, "argument": "length(Subnets[]) > `0`", "state": "retry"},
+ {"matcher": "error", "expected": "InvalidSubnetID.NotFound", "state": "success"},
+ ],
},
"VpcAvailable": {
"delay": 15,
"operation": "DescribeVpcs",
"maxAttempts": 40,
"acceptors": [
- {
- "expected": "available",
- "matcher": "pathAll",
- "state": "success",
- "argument": "Vpcs[].State"
- }
- ]
+ {"expected": "available", "matcher": "pathAll", "state": "success", "argument": "Vpcs[].State"}
+ ],
},
"VpcExists": {
"operation": "DescribeVpcs",
"delay": 1,
"maxAttempts": 5,
"acceptors": [
- {
- "matcher": "status",
- "expected": 200,
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidVpcID.NotFound",
- "state": "retry"
- }
- ]
+ {"matcher": "status", "expected": 200, "state": "success"},
+ {"matcher": "error", "expected": "InvalidVpcID.NotFound", "state": "retry"},
+ ],
},
"VpcEndpointExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeVpcEndpoints",
"acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(VpcEndpoints[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidVpcEndpointId.NotFound",
- "state": "retry"
- },
- ]
+ {"matcher": "path", "expected": True, "argument": "length(VpcEndpoints[]) > `0`", "state": "success"},
+ {"matcher": "error", "expected": "InvalidVpcEndpointId.NotFound", "state": "retry"},
+ ],
},
"VpnGatewayExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeVpnGateways",
"acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(VpnGateways[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidVpnGatewayID.NotFound",
- "state": "retry"
- },
- ]
+ {"matcher": "path", "expected": True, "argument": "length(VpnGateways[]) > `0`", "state": "success"},
+ {"matcher": "error", "expected": "InvalidVpnGatewayID.NotFound", "state": "retry"},
+ ],
},
"VpnGatewayDetached": {
"delay": 5,
@@ -393,47 +272,29 @@ ec2_data = {
"matcher": "path",
"expected": True,
"argument": "VpnGateways[0].State == 'available'",
- "state": "success"
+ "state": "success",
},
- ]
+ ],
},
"NatGatewayDeleted": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeNatGateways",
"acceptors": [
- {
- "state": "success",
- "matcher": "pathAll",
- "expected": "deleted",
- "argument": "NatGateways[].State"
- },
- {
- "state": "success",
- "matcher": "error",
- "expected": "NatGatewayNotFound"
- }
- ]
+ {"state": "success", "matcher": "pathAll", "expected": "deleted", "argument": "NatGateways[].State"},
+ {"state": "success", "matcher": "error", "expected": "NatGatewayNotFound"},
+ ],
},
"NatGatewayAvailable": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeNatGateways",
"acceptors": [
- {
- "state": "success",
- "matcher": "pathAll",
- "expected": "available",
- "argument": "NatGateways[].State"
- },
- {
- "state": "retry",
- "matcher": "error",
- "expected": "NatGatewayNotFound"
- }
- ]
+ {"state": "success", "matcher": "pathAll", "expected": "available", "argument": "NatGateways[].State"},
+ {"state": "retry", "matcher": "error", "expected": "NatGatewayNotFound"},
+ ],
},
- }
+ },
}
@@ -445,20 +306,11 @@ waf_data = {
"maxAttempts": 60,
"operation": "GetChangeTokenStatus",
"acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "ChangeTokenStatus == 'INSYNC'",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "WAFInternalErrorException",
- "state": "retry"
- }
- ]
+ {"matcher": "path", "expected": True, "argument": "ChangeTokenStatus == 'INSYNC'", "state": "success"},
+ {"matcher": "error", "expected": "WAFInternalErrorException", "state": "retry"},
+ ],
}
- }
+ },
}
eks_data = {
@@ -469,54 +321,27 @@ eks_data = {
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
- {
- "state": "success",
- "matcher": "path",
- "argument": "cluster.status",
- "expected": "ACTIVE"
- },
- {
- "state": "retry",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
+ {"state": "success", "matcher": "path", "argument": "cluster.status", "expected": "ACTIVE"},
+ {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"},
+ ],
},
"ClusterDeleted": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
- {
- "state": "retry",
- "matcher": "path",
- "argument": "cluster.status != 'DELETED'",
- "expected": True
- },
- {
- "state": "success",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
+ {"state": "retry", "matcher": "path", "argument": "cluster.status != 'DELETED'", "expected": True},
+ {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"},
+ ],
},
"FargateProfileActive": {
"delay": 20,
"maxAttempts": 30,
"operation": "DescribeFargateProfile",
"acceptors": [
- {
- "state": "success",
- "matcher": "path",
- "argument": "fargateProfile.status",
- "expected": "ACTIVE"
- },
- {
- "state": "retry",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
+ {"state": "success", "matcher": "path", "argument": "fargateProfile.status", "expected": "ACTIVE"},
+ {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"},
+ ],
},
"FargateProfileDeleted": {
"delay": 20,
@@ -527,52 +352,30 @@ eks_data = {
"state": "retry",
"matcher": "path",
"argument": "fargateProfile.status == 'DELETING'",
- "expected": True
+ "expected": True,
},
- {
- "state": "success",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
+ {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"},
+ ],
},
"NodegroupActive": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeNodegroup",
"acceptors": [
- {
- "state": "success",
- "matcher": "path",
- "argument": "nodegroup.status",
- "expected": "ACTIVE"
- },
- {
- "state": "retry",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
+ {"state": "success", "matcher": "path", "argument": "nodegroup.status", "expected": "ACTIVE"},
+ {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"},
+ ],
},
"NodegroupDeleted": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeNodegroup",
"acceptors": [
- {
- "state": "retry",
- "matcher": "path",
- "argument": "nodegroup.status == 'DELETING'",
- "expected": True
- },
- {
- "state": "success",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
- }
- }
+ {"state": "retry", "matcher": "path", "argument": "nodegroup.status == 'DELETING'", "expected": True},
+ {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"},
+ ],
+ },
+ },
}
@@ -585,12 +388,12 @@ elb_data = {
"argument": "InstanceStates[].State",
"expected": "InService",
"matcher": "pathAny",
- "state": "success"
+ "state": "success",
}
],
"delay": 15,
"maxAttempts": 40,
- "operation": "DescribeInstanceHealth"
+ "operation": "DescribeInstanceHealth",
},
"InstanceDeregistered": {
"delay": 15,
@@ -601,14 +404,10 @@ elb_data = {
"expected": "OutOfService",
"matcher": "pathAll",
"state": "success",
- "argument": "InstanceStates[].State"
+ "argument": "InstanceStates[].State",
},
- {
- "matcher": "error",
- "expected": "InvalidInstance",
- "state": "success"
- }
- ]
+ {"matcher": "error", "expected": "InvalidInstance", "state": "success"},
+ ],
},
"InstanceInService": {
"acceptors": [
@@ -616,17 +415,13 @@ elb_data = {
"argument": "InstanceStates[].State",
"expected": "InService",
"matcher": "pathAll",
- "state": "success"
+ "state": "success",
},
- {
- "matcher": "error",
- "expected": "InvalidInstance",
- "state": "retry"
- }
+ {"matcher": "error", "expected": "InvalidInstance", "state": "retry"},
],
"delay": 15,
"maxAttempts": 40,
- "operation": "DescribeInstanceHealth"
+ "operation": "DescribeInstanceHealth",
},
"LoadBalancerCreated": {
"delay": 10,
@@ -664,7 +459,7 @@ elb_data = {
},
],
},
- }
+ },
}
elbv2_data = {
@@ -679,20 +474,16 @@ elbv2_data = {
"state": "success",
"matcher": "pathAll",
"argument": "LoadBalancers[].State.Code",
- "expected": "active"
+ "expected": "active",
},
{
"state": "retry",
"matcher": "pathAny",
"argument": "LoadBalancers[].State.Code",
- "expected": "provisioning"
+ "expected": "provisioning",
},
- {
- "state": "retry",
- "matcher": "error",
- "expected": "LoadBalancerNotFound"
- }
- ]
+ {"state": "retry", "matcher": "error", "expected": "LoadBalancerNotFound"},
+ ],
},
"LoadBalancerIpAddressTypeIpv4": {
"delay": 15,
@@ -703,20 +494,16 @@ elbv2_data = {
"state": "success",
"matcher": "pathAll",
"argument": "LoadBalancers[].IpAddressType",
- "expected": "ipv4"
+ "expected": "ipv4",
},
{
"state": "retry",
"matcher": "pathAny",
"argument": "LoadBalancers[].IpAddressType",
- "expected": "dualstack"
+ "expected": "dualstack",
},
- {
- "state": "failure",
- "matcher": "error",
- "expected": "LoadBalancerNotFound"
- }
- ]
+ {"state": "failure", "matcher": "error", "expected": "LoadBalancerNotFound"},
+ ],
},
"LoadBalancerIpAddressTypeDualStack": {
"delay": 15,
@@ -727,20 +514,16 @@ elbv2_data = {
"state": "success",
"matcher": "pathAll",
"argument": "LoadBalancers[].IpAddressType",
- "expected": "dualstack"
+ "expected": "dualstack",
},
{
"state": "retry",
"matcher": "pathAny",
"argument": "LoadBalancers[].IpAddressType",
- "expected": "ipv4"
+ "expected": "ipv4",
},
- {
- "state": "failure",
- "matcher": "error",
- "expected": "LoadBalancerNotFound"
- }
- ]
+ {"state": "failure", "matcher": "error", "expected": "LoadBalancerNotFound"},
+ ],
},
"LoadBalancersDeleted": {
"delay": 15,
@@ -751,22 +534,31 @@ elbv2_data = {
"state": "retry",
"matcher": "pathAll",
"argument": "LoadBalancers[].State.Code",
- "expected": "active"
+ "expected": "active",
},
- {
- "matcher": "error",
- "expected": "LoadBalancerNotFound",
- "state": "success"
- }
- ]
+ {"matcher": "error", "expected": "LoadBalancerNotFound", "state": "success"},
+ ],
},
- }
+ },
}
rds_data = {
"version": 2,
"waiters": {
+ "DBClusterPromoting": {
+ "delay": 5,
+ "maxAttempts": 60,
+ "operation": "DescribeDBClusters",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBClusters[].Status",
+ "expected": "promoting",
+ },
+ ],
+ },
"DBInstanceStopped": {
"delay": 20,
"maxAttempts": 60,
@@ -776,45 +568,27 @@ rds_data = {
"state": "success",
"matcher": "pathAll",
"argument": "DBInstances[].DBInstanceStatus",
- "expected": "stopped"
+ "expected": "stopped",
},
- ]
+ ],
},
"DBClusterAvailable": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeDBClusters",
"acceptors": [
- {
- "state": "success",
- "matcher": "pathAll",
- "argument": "DBClusters[].Status",
- "expected": "available"
- },
- {
- "state": "retry",
- "matcher": "error",
- "expected": "DBClusterNotFoundFault"
- }
- ]
+ {"state": "success", "matcher": "pathAll", "argument": "DBClusters[].Status", "expected": "available"},
+ {"state": "retry", "matcher": "error", "expected": "DBClusterNotFoundFault"},
+ ],
},
"DBClusterDeleted": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeDBClusters",
"acceptors": [
- {
- "state": "success",
- "matcher": "pathAll",
- "argument": "DBClusters[].Status",
- "expected": "stopped"
- },
- {
- "state": "success",
- "matcher": "error",
- "expected": "DBClusterNotFoundFault"
- }
- ]
+ {"state": "success", "matcher": "pathAll", "argument": "DBClusters[].Status", "expected": "stopped"},
+ {"state": "success", "matcher": "error", "expected": "DBClusterNotFoundFault"},
+ ],
},
"ReadReplicaPromoted": {
"delay": 5,
@@ -825,15 +599,15 @@ rds_data = {
"state": "success",
"matcher": "path",
"argument": "length(DBInstances[].StatusInfos) == `0`",
- "expected": True
+ "expected": True,
},
{
"state": "retry",
"matcher": "pathAny",
"argument": "DBInstances[].StatusInfos[].Status",
- "expected": "replicating"
- }
- ]
+ "expected": "replicating",
+ },
+ ],
},
"RoleAssociated": {
"delay": 5,
@@ -844,15 +618,15 @@ rds_data = {
"state": "success",
"matcher": "pathAll",
"argument": "DBInstances[].AssociatedRoles[].Status",
- "expected": "ACTIVE"
+ "expected": "ACTIVE",
},
{
"state": "retry",
"matcher": "pathAny",
"argument": "DBInstances[].AssociatedRoles[].Status",
- "expected": "PENDING"
- }
- ]
+ "expected": "PENDING",
+ },
+ ],
},
"RoleDisassociated": {
"delay": 5,
@@ -863,23 +637,23 @@ rds_data = {
"state": "success",
"matcher": "pathAll",
"argument": "DBInstances[].AssociatedRoles[].Status",
- "expected": "ACTIVE"
+ "expected": "ACTIVE",
},
{
"state": "retry",
"matcher": "pathAny",
"argument": "DBInstances[].AssociatedRoles[].Status",
- "expected": "PENDING"
+ "expected": "PENDING",
},
{
"state": "success",
"matcher": "path",
"argument": "length(DBInstances[].AssociatedRoles[]) == `0`",
- "expected": True
+ "expected": True,
},
- ]
- }
- }
+ ],
+ },
+ },
}
@@ -891,24 +665,23 @@ route53_data = {
"maxAttempts": 60,
"operation": "GetChange",
"acceptors": [
- {
- "matcher": "path",
- "expected": "INSYNC",
- "argument": "ChangeInfo.Status",
- "state": "success"
- }
- ]
+ {"matcher": "path", "expected": "INSYNC", "argument": "ChangeInfo.Status", "state": "success"}
+ ],
}
- }
+ },
}
def _inject_limit_retries(model):
-
extra_retries = [
- 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
- 'InternalFailure', 'InternalError', 'TooManyRequestsException',
- 'Throttling']
+ "RequestLimitExceeded",
+ "Unavailable",
+ "ServiceUnavailable",
+ "InternalFailure",
+ "InternalError",
+ "TooManyRequestsException",
+ "Throttling",
+ ]
acceptors = []
for error in extra_retries:
@@ -958,308 +731,246 @@ def route53_model(name):
waiters_by_name = {
- ('EC2', 'image_available'): lambda ec2: core_waiter.Waiter(
- 'image_available',
- ec2_model('ImageAvailable'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_images
- )),
- ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
- 'internet_gateway_exists',
- ec2_model('InternetGatewayExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_internet_gateways
- )),
- ('EC2', 'internet_gateway_attached'): lambda ec2: core_waiter.Waiter(
- 'internet_gateway_attached',
- ec2_model('InternetGatewayAttached'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_internet_gateways
- )),
- ('EC2', 'network_interface_attached'): lambda ec2: core_waiter.Waiter(
- 'network_interface_attached',
- ec2_model('NetworkInterfaceAttached'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_network_interfaces
- )),
- ('EC2', 'network_interface_deleted'): lambda ec2: core_waiter.Waiter(
- 'network_interface_deleted',
- ec2_model('NetworkInterfaceDeleted'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_network_interfaces
- )),
- ('EC2', 'network_interface_available'): lambda ec2: core_waiter.Waiter(
- 'network_interface_available',
- ec2_model('NetworkInterfaceAvailable'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_network_interfaces
- )),
- ('EC2', 'network_interface_delete_on_terminate'): lambda ec2: core_waiter.Waiter(
- 'network_interface_delete_on_terminate',
- ec2_model('NetworkInterfaceDeleteOnTerminate'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_network_interfaces
- )),
- ('EC2', 'network_interface_no_delete_on_terminate'): lambda ec2: core_waiter.Waiter(
- 'network_interface_no_delete_on_terminate',
- ec2_model('NetworkInterfaceNoDeleteOnTerminate'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_network_interfaces
- )),
- ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
- 'route_table_exists',
- ec2_model('RouteTableExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_route_tables
- )),
- ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
- 'security_group_exists',
- ec2_model('SecurityGroupExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_security_groups
- )),
- ('EC2', 'snapshot_completed'): lambda ec2: core_waiter.Waiter(
- 'snapshot_completed',
- ec2_model('SnapshotCompleted'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_snapshots
- )),
- ('EC2', 'subnet_available'): lambda ec2: core_waiter.Waiter(
- 'subnet_available',
- ec2_model('SubnetAvailable'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
- 'subnet_exists',
- ec2_model('SubnetExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
- 'subnet_has_map_public',
- ec2_model('SubnetHasMapPublic'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
- 'subnet_no_map_public',
- ec2_model('SubnetNoMapPublic'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
- 'subnet_has_assign_ipv6',
- ec2_model('SubnetHasAssignIpv6'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
- 'subnet_no_assign_ipv6',
- ec2_model('SubnetNoAssignIpv6'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
- 'subnet_deleted',
- ec2_model('SubnetDeleted'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'vpc_available'): lambda ec2: core_waiter.Waiter(
- 'vpc_available',
- ec2_model('VpcAvailable'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpcs
- )),
- ('EC2', 'vpc_exists'): lambda ec2: core_waiter.Waiter(
- 'vpc_exists',
- ec2_model('VpcExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpcs
- )),
- ('EC2', 'vpc_endpoint_exists'): lambda ec2: core_waiter.Waiter(
- 'vpc_endpoint_exists',
- ec2_model('VpcEndpointExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpc_endpoints
- )),
- ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
- 'vpn_gateway_exists',
- ec2_model('VpnGatewayExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpn_gateways
- )),
- ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
- 'vpn_gateway_detached',
- ec2_model('VpnGatewayDetached'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpn_gateways
- )),
- ('EC2', 'nat_gateway_deleted'): lambda ec2: core_waiter.Waiter(
- 'nat_gateway_deleted',
- ec2_model('NatGatewayDeleted'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_nat_gateways
- )),
- ('EC2', 'nat_gateway_available'): lambda ec2: core_waiter.Waiter(
- 'nat_gateway_available',
- ec2_model('NatGatewayAvailable'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_nat_gateways
- )),
- ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
- 'change_token_in_sync',
- waf_model('ChangeTokenInSync'),
- core_waiter.NormalizedOperationMethod(
- waf.get_change_token_status
- )),
- ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
- 'change_token_in_sync',
- waf_model('ChangeTokenInSync'),
- core_waiter.NormalizedOperationMethod(
- waf.get_change_token_status
- )),
- ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
- 'cluster_active',
- eks_model('ClusterActive'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_cluster
- )),
- ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
- 'cluster_deleted',
- eks_model('ClusterDeleted'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_cluster
- )),
- ('EKS', 'fargate_profile_active'): lambda eks: core_waiter.Waiter(
- 'fargate_profile_active',
- eks_model('FargateProfileActive'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_fargate_profile
- )),
- ('EKS', 'fargate_profile_deleted'): lambda eks: core_waiter.Waiter(
- 'fargate_profile_deleted',
- eks_model('FargateProfileDeleted'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_fargate_profile
- )),
- ('EKS', 'nodegroup_active'): lambda eks: core_waiter.Waiter(
- 'nodegroup_active',
- eks_model('NodegroupActive'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_nodegroup
- )),
- ('EKS', 'nodegroup_deleted'): lambda eks: core_waiter.Waiter(
- 'nodegroup_deleted',
- eks_model('NodegroupDeleted'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_nodegroup
- )),
- ('ElasticLoadBalancing', 'any_instance_in_service'): lambda elb: core_waiter.Waiter(
- 'any_instance_in_service',
- elb_model('AnyInstanceInService'),
- core_waiter.NormalizedOperationMethod(
- elb.describe_instance_health
- )),
- ('ElasticLoadBalancing', 'instance_deregistered'): lambda elb: core_waiter.Waiter(
- 'instance_deregistered',
- elb_model('InstanceDeregistered'),
- core_waiter.NormalizedOperationMethod(
- elb.describe_instance_health
- )),
- ('ElasticLoadBalancing', 'instance_in_service'): lambda elb: core_waiter.Waiter(
- 'load_balancer_created',
- elb_model('InstanceInService'),
- core_waiter.NormalizedOperationMethod(
- elb.describe_instance_health
- )),
- ('ElasticLoadBalancing', 'load_balancer_created'): lambda elb: core_waiter.Waiter(
- 'load_balancer_created',
- elb_model('LoadBalancerCreated'),
- core_waiter.NormalizedOperationMethod(
- elb.describe_load_balancers
- )),
- ('ElasticLoadBalancing', 'load_balancer_deleted'): lambda elb: core_waiter.Waiter(
- 'load_balancer_deleted',
- elb_model('LoadBalancerDeleted'),
- core_waiter.NormalizedOperationMethod(
- elb.describe_load_balancers
- )),
- ('ElasticLoadBalancingv2', 'load_balancer_available'): lambda elbv2: core_waiter.Waiter(
- 'load_balancer_available',
- elbv2_model('LoadBalancerAvailable'),
- core_waiter.NormalizedOperationMethod(
- elbv2.describe_load_balancers
- )),
- ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_ipv4'): lambda elbv2: core_waiter.Waiter(
- 'load_balancer_ip_address_type_ipv4',
- elbv2_model('LoadBalancerIpAddressTypeIpv4'),
- core_waiter.NormalizedOperationMethod(
- elbv2.describe_load_balancers
- )),
- ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_dualstack'): lambda elbv2: core_waiter.Waiter(
- 'load_balancers_ip_address_type_dualstack',
- elbv2_model('LoadBalancerIpAddressTypeDualStack'),
- core_waiter.NormalizedOperationMethod(
- elbv2.describe_load_balancers
- )),
- ('ElasticLoadBalancingv2', 'load_balancers_deleted'): lambda elbv2: core_waiter.Waiter(
- 'load_balancers_deleted',
- elbv2_model('LoadBalancersDeleted'),
- core_waiter.NormalizedOperationMethod(
- elbv2.describe_load_balancers
- )),
- ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
- 'db_instance_stopped',
- rds_model('DBInstanceStopped'),
- core_waiter.NormalizedOperationMethod(
- rds.describe_db_instances
- )),
- ('RDS', 'cluster_available'): lambda rds: core_waiter.Waiter(
- 'cluster_available',
- rds_model('DBClusterAvailable'),
- core_waiter.NormalizedOperationMethod(
- rds.describe_db_clusters
- )),
- ('RDS', 'cluster_deleted'): lambda rds: core_waiter.Waiter(
- 'cluster_deleted',
- rds_model('DBClusterDeleted'),
- core_waiter.NormalizedOperationMethod(
- rds.describe_db_clusters
- )),
- ('RDS', 'read_replica_promoted'): lambda rds: core_waiter.Waiter(
- 'read_replica_promoted',
- rds_model('ReadReplicaPromoted'),
- core_waiter.NormalizedOperationMethod(
- rds.describe_db_instances
- )),
- ('RDS', 'role_associated'): lambda rds: core_waiter.Waiter(
- 'role_associated',
- rds_model('RoleAssociated'),
- core_waiter.NormalizedOperationMethod(
- rds.describe_db_instances
- )),
- ('RDS', 'role_disassociated'): lambda rds: core_waiter.Waiter(
- 'role_disassociated',
- rds_model('RoleDisassociated'),
- core_waiter.NormalizedOperationMethod(
- rds.describe_db_instances
- )),
- ('Route53', 'resource_record_sets_changed'): lambda route53: core_waiter.Waiter(
- 'resource_record_sets_changed',
- route53_model('ResourceRecordSetsChanged'),
- core_waiter.NormalizedOperationMethod(
- route53.get_change
- )),
+ ("EC2", "image_available"): lambda ec2: core_waiter.Waiter(
+ "image_available", ec2_model("ImageAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_images)
+ ),
+ ("EC2", "internet_gateway_exists"): lambda ec2: core_waiter.Waiter(
+ "internet_gateway_exists",
+ ec2_model("InternetGatewayExists"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_internet_gateways),
+ ),
+ ("EC2", "internet_gateway_attached"): lambda ec2: core_waiter.Waiter(
+ "internet_gateway_attached",
+ ec2_model("InternetGatewayAttached"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_internet_gateways),
+ ),
+ ("EC2", "network_interface_attached"): lambda ec2: core_waiter.Waiter(
+ "network_interface_attached",
+ ec2_model("NetworkInterfaceAttached"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces),
+ ),
+ ("EC2", "network_interface_deleted"): lambda ec2: core_waiter.Waiter(
+ "network_interface_deleted",
+ ec2_model("NetworkInterfaceDeleted"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces),
+ ),
+ ("EC2", "network_interface_available"): lambda ec2: core_waiter.Waiter(
+ "network_interface_available",
+ ec2_model("NetworkInterfaceAvailable"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces),
+ ),
+ ("EC2", "network_interface_delete_on_terminate"): lambda ec2: core_waiter.Waiter(
+ "network_interface_delete_on_terminate",
+ ec2_model("NetworkInterfaceDeleteOnTerminate"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces),
+ ),
+ ("EC2", "network_interface_no_delete_on_terminate"): lambda ec2: core_waiter.Waiter(
+ "network_interface_no_delete_on_terminate",
+ ec2_model("NetworkInterfaceNoDeleteOnTerminate"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces),
+ ),
+ ("EC2", "route_table_exists"): lambda ec2: core_waiter.Waiter(
+ "route_table_exists",
+ ec2_model("RouteTableExists"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_route_tables),
+ ),
+ ("EC2", "security_group_exists"): lambda ec2: core_waiter.Waiter(
+ "security_group_exists",
+ ec2_model("SecurityGroupExists"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_security_groups),
+ ),
+ ("EC2", "snapshot_completed"): lambda ec2: core_waiter.Waiter(
+ "snapshot_completed",
+ ec2_model("SnapshotCompleted"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_snapshots),
+ ),
+ ("EC2", "subnet_available"): lambda ec2: core_waiter.Waiter(
+ "subnet_available", ec2_model("SubnetAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets)
+ ),
+ ("EC2", "subnet_exists"): lambda ec2: core_waiter.Waiter(
+ "subnet_exists", ec2_model("SubnetExists"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets)
+ ),
+ ("EC2", "subnet_has_map_public"): lambda ec2: core_waiter.Waiter(
+ "subnet_has_map_public",
+ ec2_model("SubnetHasMapPublic"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_subnets),
+ ),
+ ("EC2", "subnet_no_map_public"): lambda ec2: core_waiter.Waiter(
+ "subnet_no_map_public",
+ ec2_model("SubnetNoMapPublic"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_subnets),
+ ),
+ ("EC2", "subnet_has_assign_ipv6"): lambda ec2: core_waiter.Waiter(
+ "subnet_has_assign_ipv6",
+ ec2_model("SubnetHasAssignIpv6"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_subnets),
+ ),
+ ("EC2", "subnet_no_assign_ipv6"): lambda ec2: core_waiter.Waiter(
+ "subnet_no_assign_ipv6",
+ ec2_model("SubnetNoAssignIpv6"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_subnets),
+ ),
+ ("EC2", "subnet_deleted"): lambda ec2: core_waiter.Waiter(
+ "subnet_deleted", ec2_model("SubnetDeleted"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets)
+ ),
+ ("EC2", "vpc_available"): lambda ec2: core_waiter.Waiter(
+ "vpc_available", ec2_model("VpcAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_vpcs)
+ ),
+ ("EC2", "vpc_exists"): lambda ec2: core_waiter.Waiter(
+ "vpc_exists", ec2_model("VpcExists"), core_waiter.NormalizedOperationMethod(ec2.describe_vpcs)
+ ),
+ ("EC2", "vpc_endpoint_exists"): lambda ec2: core_waiter.Waiter(
+ "vpc_endpoint_exists",
+ ec2_model("VpcEndpointExists"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_vpc_endpoints),
+ ),
+ ("EC2", "vpn_gateway_exists"): lambda ec2: core_waiter.Waiter(
+ "vpn_gateway_exists",
+ ec2_model("VpnGatewayExists"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_vpn_gateways),
+ ),
+ ("EC2", "vpn_gateway_detached"): lambda ec2: core_waiter.Waiter(
+ "vpn_gateway_detached",
+ ec2_model("VpnGatewayDetached"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_vpn_gateways),
+ ),
+ ("EC2", "nat_gateway_deleted"): lambda ec2: core_waiter.Waiter(
+ "nat_gateway_deleted",
+ ec2_model("NatGatewayDeleted"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_nat_gateways),
+ ),
+ ("EC2", "nat_gateway_available"): lambda ec2: core_waiter.Waiter(
+ "nat_gateway_available",
+ ec2_model("NatGatewayAvailable"),
+ core_waiter.NormalizedOperationMethod(ec2.describe_nat_gateways),
+ ),
+ ("WAF", "change_token_in_sync"): lambda waf: core_waiter.Waiter(
+ "change_token_in_sync",
+ waf_model("ChangeTokenInSync"),
+ core_waiter.NormalizedOperationMethod(waf.get_change_token_status),
+ ),
+ ("WAFRegional", "change_token_in_sync"): lambda waf: core_waiter.Waiter(
+ "change_token_in_sync",
+ waf_model("ChangeTokenInSync"),
+ core_waiter.NormalizedOperationMethod(waf.get_change_token_status),
+ ),
+ ("EKS", "cluster_active"): lambda eks: core_waiter.Waiter(
+ "cluster_active", eks_model("ClusterActive"), core_waiter.NormalizedOperationMethod(eks.describe_cluster)
+ ),
+ ("EKS", "cluster_deleted"): lambda eks: core_waiter.Waiter(
+ "cluster_deleted", eks_model("ClusterDeleted"), core_waiter.NormalizedOperationMethod(eks.describe_cluster)
+ ),
+ ("EKS", "fargate_profile_active"): lambda eks: core_waiter.Waiter(
+ "fargate_profile_active",
+ eks_model("FargateProfileActive"),
+ core_waiter.NormalizedOperationMethod(eks.describe_fargate_profile),
+ ),
+ ("EKS", "fargate_profile_deleted"): lambda eks: core_waiter.Waiter(
+ "fargate_profile_deleted",
+ eks_model("FargateProfileDeleted"),
+ core_waiter.NormalizedOperationMethod(eks.describe_fargate_profile),
+ ),
+ ("EKS", "nodegroup_active"): lambda eks: core_waiter.Waiter(
+ "nodegroup_active", eks_model("NodegroupActive"), core_waiter.NormalizedOperationMethod(eks.describe_nodegroup)
+ ),
+ ("EKS", "nodegroup_deleted"): lambda eks: core_waiter.Waiter(
+ "nodegroup_deleted",
+ eks_model("NodegroupDeleted"),
+ core_waiter.NormalizedOperationMethod(eks.describe_nodegroup),
+ ),
+ ("ElasticLoadBalancing", "any_instance_in_service"): lambda elb: core_waiter.Waiter(
+ "any_instance_in_service",
+ elb_model("AnyInstanceInService"),
+ core_waiter.NormalizedOperationMethod(elb.describe_instance_health),
+ ),
+ ("ElasticLoadBalancing", "instance_deregistered"): lambda elb: core_waiter.Waiter(
+ "instance_deregistered",
+ elb_model("InstanceDeregistered"),
+ core_waiter.NormalizedOperationMethod(elb.describe_instance_health),
+ ),
+ ("ElasticLoadBalancing", "instance_in_service"): lambda elb: core_waiter.Waiter(
+ "load_balancer_created",
+ elb_model("InstanceInService"),
+ core_waiter.NormalizedOperationMethod(elb.describe_instance_health),
+ ),
+ ("ElasticLoadBalancing", "load_balancer_created"): lambda elb: core_waiter.Waiter(
+ "load_balancer_created",
+ elb_model("LoadBalancerCreated"),
+ core_waiter.NormalizedOperationMethod(elb.describe_load_balancers),
+ ),
+ ("ElasticLoadBalancing", "load_balancer_deleted"): lambda elb: core_waiter.Waiter(
+ "load_balancer_deleted",
+ elb_model("LoadBalancerDeleted"),
+ core_waiter.NormalizedOperationMethod(elb.describe_load_balancers),
+ ),
+ ("ElasticLoadBalancingv2", "load_balancer_available"): lambda elbv2: core_waiter.Waiter(
+ "load_balancer_available",
+ elbv2_model("LoadBalancerAvailable"),
+ core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers),
+ ),
+ ("ElasticLoadBalancingv2", "load_balancer_ip_address_type_ipv4"): lambda elbv2: core_waiter.Waiter(
+ "load_balancer_ip_address_type_ipv4",
+ elbv2_model("LoadBalancerIpAddressTypeIpv4"),
+ core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers),
+ ),
+ ("ElasticLoadBalancingv2", "load_balancer_ip_address_type_dualstack"): lambda elbv2: core_waiter.Waiter(
+ "load_balancers_ip_address_type_dualstack",
+ elbv2_model("LoadBalancerIpAddressTypeDualStack"),
+ core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers),
+ ),
+ ("ElasticLoadBalancingv2", "load_balancers_deleted"): lambda elbv2: core_waiter.Waiter(
+ "load_balancers_deleted",
+ elbv2_model("LoadBalancersDeleted"),
+ core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers),
+ ),
+ ("RDS", "db_cluster_promoting"): lambda rds: core_waiter.Waiter(
+ "db_cluster_promoting",
+ rds_model("DBClusterPromoting"),
+ core_waiter.NormalizedOperationMethod(rds.describe_db_clusters),
+ ),
+ ("RDS", "db_instance_stopped"): lambda rds: core_waiter.Waiter(
+ "db_instance_stopped",
+ rds_model("DBInstanceStopped"),
+ core_waiter.NormalizedOperationMethod(rds.describe_db_instances),
+ ),
+ ("RDS", "cluster_available"): lambda rds: core_waiter.Waiter(
+ "cluster_available",
+ rds_model("DBClusterAvailable"),
+ core_waiter.NormalizedOperationMethod(rds.describe_db_clusters),
+ ),
+ ("RDS", "cluster_deleted"): lambda rds: core_waiter.Waiter(
+ "cluster_deleted",
+ rds_model("DBClusterDeleted"),
+ core_waiter.NormalizedOperationMethod(rds.describe_db_clusters),
+ ),
+ ("RDS", "read_replica_promoted"): lambda rds: core_waiter.Waiter(
+ "read_replica_promoted",
+ rds_model("ReadReplicaPromoted"),
+ core_waiter.NormalizedOperationMethod(rds.describe_db_instances),
+ ),
+ ("RDS", "role_associated"): lambda rds: core_waiter.Waiter(
+ "role_associated", rds_model("RoleAssociated"), core_waiter.NormalizedOperationMethod(rds.describe_db_instances)
+ ),
+ ("RDS", "role_disassociated"): lambda rds: core_waiter.Waiter(
+ "role_disassociated",
+ rds_model("RoleDisassociated"),
+ core_waiter.NormalizedOperationMethod(rds.describe_db_instances),
+ ),
+ ("Route53", "resource_record_sets_changed"): lambda route53: core_waiter.Waiter(
+ "resource_record_sets_changed",
+ route53_model("ResourceRecordSetsChanged"),
+ core_waiter.NormalizedOperationMethod(route53.get_change),
+ ),
}
def get_waiter(client, waiter_name):
- if isinstance(client, _RetryingBotoClientWrapper):
+ if isinstance(client, RetryingBotoClientWrapper):
return get_waiter(client.client, waiter_name)
try:
return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
except KeyError:
- raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
- waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
+ available_waiters = ", ".join(repr(k) for k in waiters_by_name.keys())
+ raise NotImplementedError(
+ f"Waiter {waiter_name} could not be found for client {type(client)}. Available waiters: {available_waiters}"
+ )