summaryrefslogtreecommitdiffstats
path: root/ansible_collections/amazon/aws/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-05 16:18:34 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-05 16:18:34 +0000
commit3667197efb7b18ec842efd504785965911f8ac4b (patch)
tree0b986a4bc6879d080b100666a97cdabbc9ca1f28 /ansible_collections/amazon/aws/plugins
parentAdding upstream version 9.5.1+dfsg. (diff)
downloadansible-3667197efb7b18ec842efd504785965911f8ac4b.tar.xz
ansible-3667197efb7b18ec842efd504785965911f8ac4b.zip
Adding upstream version 10.0.0+dfsg.upstream/10.0.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/amazon/aws/plugins')
-rw-r--r--ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py12
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py17
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py6
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/acm.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/botocore.py20
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/common.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/ec2.py18
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/elbv2.py108
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/iam.py10
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/modules.py10
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/policy.py57
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/rds.py41
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/s3.py4
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py29
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudformation.py40
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudtrail.py13
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_ami.py43
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py12
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py61
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eni.py27
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_instance.py260
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py116
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py163
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py8
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vol.py18
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py3
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py27
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py16
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_policy.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_role.py40
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_role_info.py23
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/kms_key.py79
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/kms_key_info.py50
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_event.py249
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_info.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster.py10
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group.py275
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group_info.py157
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_engine_versions_info.py388
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_instance.py12
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_instance_param_group.py (renamed from ansible_collections/amazon/aws/plugins/modules/rds_param_group.py)15
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/route53_health_check.py12
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_bucket.py798
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_object.py162
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_object_info.py6
48 files changed, 2529 insertions, 900 deletions
diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
index 8b9796b7f..bf0bc50b1 100644
--- a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
+++ b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
@@ -633,17 +633,17 @@ class InventoryModule(AWSInventoryBase):
"""
instances = []
ids_to_ignore = []
- for filter in exclude_filters:
+ for filter_dict in exclude_filters:
for i in self._get_instances_by_region(
regions,
- ansible_dict_to_boto3_filter_list(filter),
+ ansible_dict_to_boto3_filter_list(filter_dict),
strict_permissions,
):
ids_to_ignore.append(i["InstanceId"])
- for filter in include_filters:
+ for filter_dict in include_filters:
for i in self._get_instances_by_region(
regions,
- ansible_dict_to_boto3_filter_list(filter),
+ ansible_dict_to_boto3_filter_list(filter_dict),
strict_permissions,
):
if i["InstanceId"] not in ids_to_ignore:
@@ -805,8 +805,8 @@ class InventoryModule(AWSInventoryBase):
if self.get_option("include_extra_api_calls"):
self.display.deprecate(
- "The include_extra_api_calls option has been deprecated and will be removed in release 6.0.0.",
- date="2024-09-01",
+ "The include_extra_api_calls option has been deprecated and will be removed in release 9.0.0.",
+ version="9.0.0",
collection_name="amazon.aws",
)
diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py b/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py
index 35f05c94e..c03f14450 100644
--- a/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py
+++ b/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py
@@ -49,7 +49,7 @@ except ImportError:
class LookupModule(LookupBase):
- def lookup_constant(self, name):
+ def lookup_constant(self, name): # pylint: disable=too-many-return-statements
if name == "MINIMUM_BOTOCORE_VERSION":
return botocore_utils.MINIMUM_BOTOCORE_VERSION
if name == "MINIMUM_BOTO3_VERSION":
diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
index c01f583f0..d5ced781b 100644
--- a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
+++ b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
@@ -44,13 +44,10 @@ _raw:
import json
+import ansible.module_utils.six.moves.urllib.error
+import ansible.module_utils.urls
from ansible.errors import AnsibleLookupError
from ansible.module_utils._text import to_native
-from ansible.module_utils.six.moves.urllib.error import HTTPError
-from ansible.module_utils.six.moves.urllib.error import URLError
-from ansible.module_utils.urls import ConnectionError
-from ansible.module_utils.urls import SSLValidationError
-from ansible.module_utils.urls import open_url
from ansible.plugins.lookup import LookupBase
@@ -64,19 +61,19 @@ class LookupModule(LookupBase):
ip_prefix_label = "ip_prefix"
try:
- resp = open_url("https://ip-ranges.amazonaws.com/ip-ranges.json")
+ resp = ansible.module_utils.urls.open_url("https://ip-ranges.amazonaws.com/ip-ranges.json")
amazon_response = json.load(resp)[prefixes_label]
except getattr(json.decoder, "JSONDecodeError", ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
raise AnsibleLookupError(f"Could not decode AWS IP ranges: {to_native(e)}")
- except HTTPError as e:
+ except ansible.module_utils.six.moves.urllib.error.HTTPError as e:
raise AnsibleLookupError(f"Received HTTP error while pulling IP ranges: {to_native(e)}")
- except SSLValidationError as e:
+ except ansible.module_utils.urls.SSLValidationError as e:
raise AnsibleLookupError(f"Error validating the server's certificate for: {to_native(e)}")
- except URLError as e:
+ except ansible.module_utils.six.moves.urllib.error.URLError as e:
raise AnsibleLookupError(f"Failed look up IP range service: {to_native(e)}")
- except ConnectionError as e:
+ except ansible.module_utils.urls.ConnectionError as e:
raise AnsibleLookupError(f"Error connecting to IP range service: {to_native(e)}")
if "region" in kwargs:
diff --git a/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py b/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py
index 06ad10be5..254182f30 100644
--- a/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py
+++ b/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py
@@ -182,9 +182,9 @@ class LookupModule(AWSLookupBase):
secrets = {}
for term in terms:
try:
- for object in _list_secrets(client, term):
- if "SecretList" in object:
- for secret_obj in object["SecretList"]:
+ for secret_wrapper in _list_secrets(client, term):
+ if "SecretList" in secret_wrapper:
+ for secret_obj in secret_wrapper["SecretList"]:
secrets.update(
{
secret_obj["Name"]: self.get_secret_value(
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
index ab3a9f073..4febe8743 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/acm.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
@@ -40,7 +40,7 @@ def acm_catch_boto_exception(func):
return func(*args, **kwargs)
except is_boto3_error_code(ignore_error_codes):
return None
- except (BotoCoreError, ClientError) as e:
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
if not module:
raise
module.fail_json_aws(e, msg=error)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
index 858e4e593..d5ad7ea83 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
@@ -202,7 +202,14 @@ def _aws_region(params):
return None
-def get_aws_region(module, boto3=None):
+def get_aws_region(module, boto3=None): # pylint: disable=redefined-outer-name
+ if boto3 is not None:
+ module.deprecate(
+ "get_aws_region(): the boto3 parameter will be removed in a release after 2025-05-01. "
+ "The parameter has been ignored since release 4.0.0.",
+ date="2025-05-01",
+ collection_name="amazon.aws",
+ )
try:
return _aws_region(module.params)
except AnsibleBotocoreError as e:
@@ -266,7 +273,14 @@ def _aws_connection_info(params):
return region, endpoint_url, boto_params
-def get_aws_connection_info(module, boto3=None):
+def get_aws_connection_info(module, boto3=None): # pylint: disable=redefined-outer-name
+ if boto3 is not None:
+ module.deprecate(
+ "get_aws_connection_info(): the boto3 parameter will be removed in a release after 2025-05-01. "
+ "The parameter has been ignored since release 4.0.0.",
+ date="2025-05-01",
+ collection_name="amazon.aws",
+ )
try:
return _aws_connection_info(module.params)
except AnsibleBotocoreError as e:
@@ -335,7 +349,7 @@ def is_boto3_error_code(code, e=None):
import sys
dummy, e, dummy = sys.exc_info()
- if not isinstance(code, list):
+ if not isinstance(code, (list, tuple, set)):
code = [code]
if isinstance(e, ClientError) and e.response["Error"]["Code"] in code:
return ClientError
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/common.py b/ansible_collections/amazon/aws/plugins/module_utils/common.py
index 41ba80231..e802a8d80 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/common.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/common.py
@@ -4,7 +4,7 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
AMAZON_AWS_COLLECTION_NAME = "amazon.aws"
-AMAZON_AWS_COLLECTION_VERSION = "7.5.0"
+AMAZON_AWS_COLLECTION_VERSION = "8.0.0"
_collection_info_context = {
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
index afe8208f5..f3aa9f3f1 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
@@ -39,6 +39,7 @@ up in this module because "that's where the AWS code was" (originally).
import re
+import ansible.module_utils.common.warnings as ansible_warnings
from ansible.module_utils.ansible_release import __version__
# Used to live here, moved into ansible.module_utils.common.dict_transformations
@@ -72,7 +73,6 @@ from .modules import aws_argument_spec as ec2_argument_spec # pylint: disable=u
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.policy
from .policy import _py3cmp as py3cmp # pylint: disable=unused-import
from .policy import compare_policies # pylint: disable=unused-import
-from .policy import sort_json_policy_dict # pylint: disable=unused-import
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.retries
from .retries import AWSRetry # pylint: disable=unused-import
@@ -99,12 +99,22 @@ def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id
a try block
"""
- def get_sg_name(sg, boto3=None):
+ def get_sg_name(sg):
return str(sg["GroupName"])
- def get_sg_id(sg, boto3=None):
+ def get_sg_id(sg):
return str(sg["GroupId"])
+ if boto3 is not None:
+ ansible_warnings.deprecate(
+ (
+ "The boto3 parameter for get_ec2_security_group_ids_from_names() has been deprecated."
+ "The parameter has been ignored since release 4.0.0."
+ ),
+ date="2025-05-01",
+ collection_name="amazon.aws",
+ )
+
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
@@ -124,7 +134,7 @@ def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id
else:
all_sec_groups = ec2_connection.describe_security_groups()["SecurityGroups"]
- unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
+ unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
index 758eb9a33..3da2114c7 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
@@ -449,7 +449,7 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2):
if module.params.get("security_groups") is not None:
try:
self.security_groups = AWSRetry.jittered_backoff()(get_ec2_security_group_ids_from_names)(
- module.params.get("security_groups"), self.connection_ec2, boto3=True
+ module.params.get("security_groups"), self.connection_ec2
)
except ValueError as e:
self.module.fail_json(msg=str(e), exception=traceback.format_exc())
@@ -775,6 +775,9 @@ class ELBListeners:
dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None)
for listener_dict in listeners
]
+ # AlpnPolicy is set as str into input but API is expected a list
+ # Transform a single item into a list of one element
+ listeners = self._ensure_listeners_alpn_policy(listeners)
self.listeners = self._ensure_listeners_default_action_has_arn(listeners)
self.current_listeners = self._get_elb_listeners()
self.purge_listeners = module.params.get("purge_listeners")
@@ -805,6 +808,16 @@ class ELBListeners:
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
+ @staticmethod
+ def _ensure_listeners_alpn_policy(listeners):
+ result = []
+ for l in listeners:
+ update_listener = deepcopy(l)
+ if "AlpnPolicy" in l:
+ update_listener["AlpnPolicy"] = [update_listener["AlpnPolicy"]]
+ result.append(update_listener)
+ return result
+
def _ensure_listeners_default_action_has_arn(self, listeners):
"""
If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and
@@ -863,7 +876,8 @@ class ELBListeners:
return listeners_to_add, listeners_to_modify, listeners_to_delete
- def _compare_listener(self, current_listener, new_listener):
+ @staticmethod
+ def _compare_listener(current_listener, new_listener):
"""
Compare two listeners.
@@ -882,43 +896,53 @@ class ELBListeners:
if current_listener["Protocol"] != new_listener["Protocol"]:
modified_listener["Protocol"] = new_listener["Protocol"]
- # If Protocol is HTTPS, check additional attributes
- if current_listener["Protocol"] == "HTTPS" and new_listener["Protocol"] == "HTTPS":
- # Cert
- if current_listener["SslPolicy"] != new_listener["SslPolicy"]:
- modified_listener["SslPolicy"] = new_listener["SslPolicy"]
- if (
- current_listener["Certificates"][0]["CertificateArn"]
- != new_listener["Certificates"][0]["CertificateArn"]
+ # If Protocol is HTTPS or TLS, check additional attributes
+ # SslPolicy
+ new_ssl_policy = new_listener.get("SslPolicy")
+ if new_ssl_policy and new_listener["Protocol"] in ("HTTPS", "TLS"):
+ current_ssl_policy = current_listener.get("SslPolicy")
+ if not current_ssl_policy or (current_ssl_policy and current_ssl_policy != new_ssl_policy):
+ modified_listener["SslPolicy"] = new_ssl_policy
+
+ # Certificates
+ new_certificates = new_listener.get("Certificates")
+ if new_certificates and new_listener["Protocol"] in ("HTTPS", "TLS"):
+ current_certificates = current_listener.get("Certificates")
+ if not current_certificates or (
+ current_certificates
+ and current_certificates[0]["CertificateArn"] != new_certificates[0]["CertificateArn"]
):
- modified_listener["Certificates"] = []
- modified_listener["Certificates"].append({})
- modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0][
- "CertificateArn"
- ]
- elif current_listener["Protocol"] != "HTTPS" and new_listener["Protocol"] == "HTTPS":
- modified_listener["SslPolicy"] = new_listener["SslPolicy"]
- modified_listener["Certificates"] = []
- modified_listener["Certificates"].append({})
- modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0]["CertificateArn"]
+ modified_listener["Certificates"] = [{"CertificateArn": new_certificates[0]["CertificateArn"]}]
# Default action
# If the lengths of the actions are the same, we'll have to verify that the
# contents of those actions are the same
- if len(current_listener["DefaultActions"]) == len(new_listener["DefaultActions"]):
- current_actions_sorted = _sort_actions(current_listener["DefaultActions"])
- new_actions_sorted = _sort_actions(new_listener["DefaultActions"])
-
- new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
-
- if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [
- _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret
- ]:
- modified_listener["DefaultActions"] = new_listener["DefaultActions"]
- # If the action lengths are different, then replace with the new actions
- else:
- modified_listener["DefaultActions"] = new_listener["DefaultActions"]
+ current_default_actions = current_listener.get("DefaultActions")
+ new_default_actions = new_listener.get("DefaultActions")
+ if new_default_actions:
+ if current_default_actions and len(current_default_actions) == len(new_default_actions):
+ current_actions_sorted = _sort_actions(current_default_actions)
+ new_actions_sorted = _sort_actions(new_default_actions)
+
+ new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
+
+ if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [
+ _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret
+ ]:
+ modified_listener["DefaultActions"] = new_default_actions
+ # If the action lengths are different, then replace with the new actions
+ else:
+ modified_listener["DefaultActions"] = new_default_actions
+
+ new_alpn_policy = new_listener.get("AlpnPolicy")
+ if new_alpn_policy:
+ if current_listener["Protocol"] == "TLS" and new_listener["Protocol"] == "TLS":
+ current_alpn_policy = current_listener.get("AlpnPolicy")
+ if not current_alpn_policy or current_alpn_policy[0] != new_alpn_policy[0]:
+ modified_listener["AlpnPolicy"] = new_alpn_policy
+ elif current_listener["Protocol"] != "TLS" and new_listener["Protocol"] == "TLS":
+ modified_listener["AlpnPolicy"] = new_alpn_policy
if modified_listener:
return modified_listener
@@ -946,7 +970,23 @@ class ELBListener:
# Rules is not a valid parameter for create_listener
if "Rules" in self.listener:
self.listener.pop("Rules")
- AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener)
+
+ # handle multiple certs by adding only 1 cert during listener creation and make calls to add_listener_certificates to add other certs
+ listener_certificates = self.listener.get("Certificates", [])
+ first_certificate, other_certs = [], []
+ if len(listener_certificates) > 0:
+ first_certificate, other_certs = listener_certificates[0], listener_certificates[1:]
+ self.listener["Certificates"] = [first_certificate]
+ # create listener
+ create_listener_result = AWSRetry.jittered_backoff()(self.connection.create_listener)(
+ LoadBalancerArn=self.elb_arn, **self.listener
+ )
+ # only one cert can be specified per call to add_listener_certificates
+ for cert in other_certs:
+ AWSRetry.jittered_backoff()(self.connection.add_listener_certificates)(
+ ListenerArn=create_listener_result["Listeners"][0]["ListenerArn"], Certificates=[cert]
+ )
+
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
index 56920d53e..155a63152 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/iam.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
@@ -49,14 +49,14 @@ def detach_iam_group_policy(client, arn, group):
@IAMErrorHandler.deletion_error_handler("detach role policy")
@AWSRetry.jittered_backoff()
def detach_iam_role_policy(client, arn, role):
- client.detach_group_policy(PolicyArn=arn, RoleName=role)
+ client.detach_role_policy(PolicyArn=arn, RoleName=role)
return True
@IAMErrorHandler.deletion_error_handler("detach user policy")
@AWSRetry.jittered_backoff()
def detach_iam_user_policy(client, arn, user):
- client.detach_group_policy(PolicyArn=arn, UserName=user)
+ client.detach_user_policy(PolicyArn=arn, UserName=user)
return True
@@ -446,8 +446,6 @@ def normalize_iam_access_keys(access_keys: BotoResourceList) -> AnsibleAWSResour
def normalize_iam_instance_profile(profile: BotoResource) -> AnsibleAWSResource:
"""
Converts a boto3 format IAM instance profile into "Ansible" format
-
- _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE.
"""
transforms = {"Roles": _normalize_iam_roles}
transformed_profile = boto3_resource_to_ansible_dict(profile, nested_transforms=transforms)
@@ -458,10 +456,10 @@ def normalize_iam_role(role: BotoResource, _v7_compat: bool = False) -> AnsibleA
"""
Converts a boto3 format IAM instance role into "Ansible" format
- _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE.
+ _v7_compat is deprecated and will be removed in release after 2026-05-01 DO NOT USE.
"""
transforms = {"InstanceProfiles": _normalize_iam_instance_profiles}
- ignore_list = [] if _v7_compat else ["AssumeRolePolicyDocument"]
+ ignore_list = ["AssumeRolePolicyDocument"]
transformed_role = boto3_resource_to_ansible_dict(role, nested_transforms=transforms, ignore_list=ignore_list)
if _v7_compat and role.get("AssumeRolePolicyDocument"):
transformed_role["assume_role_policy_document_raw"] = role["AssumeRolePolicyDocument"]
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/modules.py b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
index 8a2ff3c0b..82a81811d 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/modules.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
@@ -84,11 +84,11 @@ class AnsibleAWSModule:
def __init__(self, **kwargs):
local_settings = {}
- for key in AnsibleAWSModule.default_settings:
+ for key, default_value in AnsibleAWSModule.default_settings.items():
try:
local_settings[key] = kwargs.pop(key)
except KeyError:
- local_settings[key] = AnsibleAWSModule.default_settings[key]
+ local_settings[key] = default_value
self.settings = local_settings
if local_settings["default_args"]:
@@ -192,21 +192,21 @@ class AnsibleAWSModule:
return self._module.md5(*args, **kwargs)
def client(self, service, retry_decorator=None, **extra_params):
- region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self)
kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs)
kw_args.update(extra_params)
conn = boto3_conn(self, conn_type="client", resource=service, **kw_args)
return conn if retry_decorator is None else RetryingBotoClientWrapper(conn, retry_decorator)
def resource(self, service, **extra_params):
- region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self)
kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs)
kw_args.update(extra_params)
return boto3_conn(self, conn_type="resource", resource=service, **kw_args)
@property
def region(self):
- return get_aws_region(self, True)
+ return get_aws_region(self)
def fail_json_aws(self, exception, msg=None, **kwargs):
"""call fail_json with processed exception
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/policy.py b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
index 60b096f84..61b5edc1c 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/policy.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
@@ -30,7 +30,6 @@
from functools import cmp_to_key
-import ansible.module_utils.common.warnings as ansible_warnings
from ansible.module_utils._text import to_text
from ansible.module_utils.six import binary_type
from ansible.module_utils.six import string_types
@@ -151,59 +150,3 @@ def compare_policies(current_policy, new_policy, default_version="2008-10-17"):
new_policy.setdefault("Version", default_version)
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
-
-
-def sort_json_policy_dict(policy_dict):
- """
- DEPRECATED - will be removed in amazon.aws 8.0.0
-
- Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
- different orders will return true
- Args:
- policy_dict (dict): Dict representing IAM JSON policy.
- Basic Usage:
- >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
- >>> sort_json_policy_dict(my_iam_policy)
- Returns:
- Dict: Will return a copy of the policy as a Dict but any List will be sorted
- {
- 'Principle': {
- 'AWS': [ '7', '14', '31', '101' ]
- }
- }
- """
-
- ansible_warnings.deprecate(
- (
- "amazon.aws.module_utils.policy.sort_json_policy_dict has been deprecated, consider using "
- "amazon.aws.module_utils.policy.compare_policies instead"
- ),
- version="8.0.0",
- collection_name="amazon.aws",
- )
-
- def value_is_list(my_list):
- checked_list = []
- for item in my_list:
- if isinstance(item, dict):
- checked_list.append(sort_json_policy_dict(item))
- elif isinstance(item, list):
- checked_list.append(value_is_list(item))
- else:
- checked_list.append(item)
-
- # Sort list. If it's a list of dictionaries, sort by tuple of key-value
- # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
- checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
- return checked_list
-
- ordered_policy_dict = {}
- for key, value in policy_dict.items():
- if isinstance(value, dict):
- ordered_policy_dict[key] = sort_json_policy_dict(value)
- elif isinstance(value, list):
- ordered_policy_dict[key] = value_is_list(value)
- else:
- ordered_policy_dict[key] = value
-
- return ordered_policy_dict
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
index 85cde2e4e..20e0ae5e0 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/rds.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
@@ -5,6 +5,9 @@
from collections import namedtuple
from time import sleep
+from typing import Any
+from typing import Dict
+from typing import List
try:
from botocore.exceptions import BotoCoreError
@@ -16,6 +19,8 @@ except ImportError:
from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from .botocore import is_boto3_error_code
+from .core import AnsibleAWSModule
from .retries import AWSRetry
from .tagging import ansible_dict_to_boto3_tag_list
from .tagging import boto3_tag_list_to_ansible_dict
@@ -440,3 +445,39 @@ def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove)
params = {"DBInstanceIdentifier": instance_id, "RoleArn": role["role_arn"], "FeatureName": role["feature_name"]}
_result, changed = call_method(client, module, method_name="add_role_to_db_instance", parameters=params)
return changed
+
+
+@AWSRetry.jittered_backoff()
+def describe_db_cluster_parameter_groups(
+ module: AnsibleAWSModule, connection: Any, group_name: str
+) -> List[Dict[str, Any]]:
+ result = []
+ try:
+ params = {}
+ if group_name is not None:
+ params["DBClusterParameterGroupName"] = group_name
+ paginator = connection.get_paginator("describe_db_cluster_parameter_groups")
+ result = paginator.paginate(**params).build_full_result()["DBClusterParameterGroups"]
+ except is_boto3_error_code("DBParameterGroupNotFound"):
+ pass
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't access parameter groups information")
+ return result
+
+
+@AWSRetry.jittered_backoff()
+def describe_db_cluster_parameters(
+ module: AnsibleAWSModule, connection: Any, group_name: str, source: str = "all"
+) -> List[Dict[str, Any]]:
+ result = []
+ try:
+ paginator = connection.get_paginator("describe_db_cluster_parameters")
+ params = {"DBClusterParameterGroupName": group_name}
+ if source != "all":
+ params["Source"] = source
+ result = paginator.paginate(**params).build_full_result()["Parameters"]
+ except is_boto3_error_code("DBParameterGroupNotFound"):
+ pass
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't access RDS cluster parameters information")
+ return result
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
index 73297ffc7..961f36f22 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/s3.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
@@ -58,7 +58,7 @@ def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
- if "-" in etag:
+ if etag is not None and "-" in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split("-")[1])
try:
@@ -73,7 +73,7 @@ def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None)
if not HAS_MD5:
return None
- if "-" in etag:
+ if etag is not None and "-" in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split("-")[1])
try:
diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py
index fcd89b467..520bf9320 100644
--- a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py
+++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py
@@ -668,25 +668,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleA
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters
-ASG_ATTRIBUTES = (
- "AvailabilityZones",
- "DefaultCooldown",
- "DesiredCapacity",
- "HealthCheckGracePeriod",
- "HealthCheckType",
- "LaunchConfigurationName",
- "LoadBalancerNames",
- "MaxInstanceLifetime",
- "MaxSize",
- "MinSize",
- "AutoScalingGroupName",
- "PlacementGroup",
- "TerminationPolicies",
- "VPCZoneIdentifier",
-)
-
-INSTANCE_ATTRIBUTES = ("instance_id", "health_status", "lifecycle_state", "launch_config_name")
-
backoff_params = dict(retries=10, delay=3, backoff=1.5)
@@ -1109,7 +1090,7 @@ def wait_for_target_group(asg_connection, group_name):
def suspend_processes(ec2_connection, as_group):
- suspend_processes = set(module.params.get("suspend_processes"))
+ processes_to_suspend = set(module.params.get("suspend_processes"))
try:
suspended_processes = set([p["ProcessName"] for p in as_group["SuspendedProcesses"]])
@@ -1117,15 +1098,15 @@ def suspend_processes(ec2_connection, as_group):
# New ASG being created, no suspended_processes defined yet
suspended_processes = set()
- if suspend_processes == suspended_processes:
+ if processes_to_suspend == suspended_processes:
return False
- resume_processes = list(suspended_processes - suspend_processes)
+ resume_processes = list(suspended_processes - processes_to_suspend)
if resume_processes:
resume_asg_processes(ec2_connection, module.params.get("name"), resume_processes)
- if suspend_processes:
- suspend_asg_processes(ec2_connection, module.params.get("name"), list(suspend_processes))
+ if processes_to_suspend:
+ suspend_asg_processes(ec2_connection, module.params.get("name"), list(processes_to_suspend))
return True
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
index ae2e78068..49392fde0 100644
--- a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
@@ -57,6 +57,8 @@ options:
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template),
I(template_body) nor I(template_url) are specified, the previous template will be reused.
+ - The I(template) parameter has been deprecated and will be remove in a release after
+ 2026-05-01. It is recommended to use I(template_body) with the lookup plugin.
type: path
notification_arns:
description:
@@ -172,7 +174,9 @@ EXAMPLES = r"""
state: "present"
region: "us-east-1"
disable_rollback: true
- template: "files/cloudformation-example.json"
+ # The template parameter has been deprecated, use template_body with lookup instead.
+ # template: "files/cloudformation-example.json"
+ template_body: "{{ lookup('file', 'cloudformation-example.json') }}"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
@@ -188,7 +192,9 @@ EXAMPLES = r"""
state: "present"
region: "us-east-1"
disable_rollback: true
- template: "roles/cloudformation/files/cloudformation-example.json"
+ # The template parameter has been deprecated, use template_body with lookup instead.
+ # template: "roles/cloudformation/files/cloudformation-example.json"
+ template_body: "{{ lookup('file', 'cloudformation-example.json') }}"
role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
- name: delete a stack
@@ -339,9 +345,17 @@ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleA
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
-# Set a default, mostly for our integration tests. This will be overridden in
-# the main() loop to match the parameters we're passed
-retry_decorator = AWSRetry.jittered_backoff()
+
+@AWSRetry.jittered_backoff()
+def _search_events(cfn, stack_name, events_limit, token_filter):
+ pg = cfn.get_paginator("describe_stack_events").paginate(
+ StackName=stack_name,
+ PaginationConfig={"MaxItems": events_limit},
+ )
+ if token_filter is None:
+ return list(pg.search("StackEvents[*]"))
+
+ return list(pg.search(f"StackEvents[?ClientRequestToken == '{token_filter}']"))
def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
@@ -349,13 +363,7 @@ def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
ret = {"events": [], "log": []}
try:
- pg = cfn.get_paginator("describe_stack_events").paginate(
- StackName=stack_name, PaginationConfig={"MaxItems": events_limit}
- )
- if token_filter is not None:
- events = list(retry_decorator(pg.search)(f"StackEvents[?ClientRequestToken == '{token_filter}']"))
- else:
- events = list(pg.search("StackEvents[*]"))
+ events = _search_events(cfn, stack_name, events_limit, token_filter)
except is_boto3_error_message("does not exist"):
ret["log"].append("Stack does not exist.")
return ret
@@ -640,7 +648,13 @@ def main():
stack_name=dict(required=True),
template_parameters=dict(required=False, type="dict", default={}),
state=dict(default="present", choices=["present", "absent"]),
- template=dict(default=None, required=False, type="path"),
+ template=dict(
+ default=None,
+ required=False,
+ type="path",
+ removed_at_date="2026-05-01",
+ removed_from_collection="amazon.aws",
+ ),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
stack_policy_body=dict(default=None, required=False, type="json"),
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py
index 597d43f1b..6d9017f67 100644
--- a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py
@@ -334,19 +334,6 @@ def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True):
return True
-def get_tag_list(keys, tags):
- """
- Returns a list of dicts with tags to act on
- keys : set of keys to get the values for
- tags : the dict of tags to turn into a list
- """
- tag_list = []
- for k in keys:
- tag_list.append({"Key": k, "Value": tags[k]})
-
- return tag_list
-
-
def set_logging(module, client, name, action):
"""
Starts or stops logging based on given state
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
index 00ead5ce5..ec6663146 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
@@ -339,6 +339,11 @@ description:
returned: when AMI is created or already exists
type: str
sample: "nat-server"
+enhanced_networking:
+ description: Specifies whether enhanced networking with ENA is enabled.
+ returned: when AMI is created or already exists
+ type: bool
+ sample: true
hypervisor:
description: Type of hypervisor.
returned: when AMI is created or already exists
@@ -349,11 +354,26 @@ image_id:
returned: when AMI is created or already exists
type: str
sample: "ami-1234abcd"
+image_owner_alias:
+ description: The owner alias ( amazon | aws-marketplace).
+ returned: when AMI is created or already exists
+ type: str
+ sample: "amazon"
+image_type:
+ description: Type of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "machine"
is_public:
description: Whether image is public.
returned: when AMI is created or already exists
type: bool
sample: false
+kernel_id:
+ description: The kernel associated with the image, if any. Only applicable for machine images.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "aki-88aa75e1"
launch_permission:
description: Permissions allowing other accounts to access the AMI.
returned: when AMI is created or already exists
@@ -379,6 +399,16 @@ platform:
description: Platform of image.
returned: when AMI is created or already exists
type: str
+ sample: "Windows"
+product_codes:
+ description: Any product codes associated with the AMI.
+ returned: when AMI is created or already exists
+ type: list
+ sample: []
+ramdisk_id:
+ description: The RAM disk associated with the image, if any. Only applicable for machine images.
+ returned: when AMI is created or already exists
+ type: str
sample: null
root_device_name:
description: Root device name of image.
@@ -390,11 +420,24 @@ root_device_type:
returned: when AMI is created or already exists
type: str
sample: "ebs"
+sriov_net_support:
+ description: Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "simple"
state:
description: State of image.
returned: when AMI is created or already exists
type: str
sample: "available"
+state_reason:
+ description: The reason for the state change.
+ returned: when AMI is created or already exists
+ type: dict
+ sample: {
+ 'Code': 'string',
+ 'Message': 'string'
+ }
tags:
description: A dictionary of tags assigned to image.
returned: when AMI is created or already exists
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
index 2929a0292..906c141e1 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
@@ -112,7 +112,6 @@ images:
sample: '2017-10-16T19:22:13.000Z'
description:
description: The description of the AMI.
- returned: always
type: str
sample: ''
ena_support:
@@ -163,6 +162,11 @@ images:
returned: always
type: str
sample: '123456789012'
+ platform_details:
+ description: Platform of image.
+ returned: always
+ type: str
+ sample: "Windows"
public:
description: Whether the image has public launch permissions.
returned: always
@@ -180,7 +184,6 @@ images:
sample: ebs
sriov_net_support:
description: Whether enhanced networking is enabled.
- returned: always
type: str
sample: simple
state:
@@ -192,6 +195,11 @@ images:
description: Any tags assigned to the image.
returned: always
type: dict
+ usage_operation:
+ description: The operation of the Amazon EC2 instance and the billing code that is associated with the AMI.
+ returned: always
+ type: str
+ sample: "RunInstances"
virtualization_type:
description: The type of virtualization of the AMI.
returned: always
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py
index c00dc515c..8e775582b 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py
@@ -79,19 +79,58 @@ addresses:
description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP.
returned: on success
type: list
- sample: [{
- "allocation_id": "eipalloc-64de1b01",
- "association_id": "eipassoc-0fe9ce90d6e983e97",
- "domain": "vpc",
- "instance_id": "i-01020cfeb25b0c84f",
- "network_interface_id": "eni-02fdeadfd4beef9323b",
- "network_interface_owner_id": "0123456789",
- "private_ip_address": "10.0.0.1",
- "public_ip": "54.81.104.1",
- "tags": {
+ elements: dict
+ contains:
+ "allocation_id":
+ description: The ID representing the allocation of the address.
+ returned: always
+ type: str
+ sample: "eipalloc-64de1b01"
+ "association_id":
+ description: The ID representing the association of the address with an instance.
+ type: str
+ sample: "eipassoc-0fe9ce90d6e983e97"
+ "domain":
+ description: The network ( vpc).
+ type: str
+ returned: always
+ sample: "vpc"
+ "instance_id":
+ description: The ID of the instance that the address is associated with (if any).
+ returned: if any instance is associated
+ type: str
+ sample: "i-01020cfeb25b0c84f"
+ "network_border_group":
+ description: The name of the unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses.
+ returned: if any instance is associated
+ type: str
+ sample: "us-east-1"
+ "network_interface_id":
+ description: The ID of the network interface.
+ returned: if any instance is associated
+ type: str
+ sample: "eni-02fdeadfd4beef9323b"
+ "network_interface_owner_id":
+ description: The ID of the network interface.
+ returned: if any instance is associated
+ type: str
+ sample: "0123456789"
+ "private_ip_address":
+ description: The private IP address associated with the Elastic IP address.
+ returned: always
+ type: str
+ sample: "10.0.0.1"
+ "public_ip":
+ description: The Elastic IP address.
+ returned: if any instance is associated
+ type: str
+ sample: "54.81.104.1"
+ "tags":
+ description: Any tags assigned to the Elastic IP address.
+ type: dict
+ sample: {
"Name": "test-vm-54.81.104.1"
}
- }]
"""
try:
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
index bf8e76a2b..794ed45a9 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
@@ -217,15 +217,25 @@ interface:
returned: when state != absent
type: complex
contains:
+ attachment:
+ description: The network interface attachment.
+ type: dict
+ sample: {
+ "attach_time": "2024-04-25T20:57:20+00:00",
+ "attachment_id": "eni-attach-0ddce58b341a1846f",
+ "delete_on_termination": true,
+ "device_index": 0,
+ "instance_id": "i-032cb1cceb29250d2",
+ "status": "attached"
+ }
description:
description: interface description
type: str
sample: Firewall network interface
groups:
- description: list of security groups
- type: list
- elements: dict
- sample: [ { "sg-f8a8a9da": "default" } ]
+ description: dict of security groups
+ type: dict
+ sample: { "sg-f8a8a9da": "default" }
id:
description: network interface id
type: str
@@ -368,10 +378,7 @@ def correct_ip_count(connection, ip_count, module, eni_id):
for ip in eni["PrivateIpAddresses"]:
private_addresses.add(ip["PrivateIpAddress"])
- if len(private_addresses) == ip_count:
- return True
- else:
- return False
+ return bool(len(private_addresses) == ip_count)
def wait_for(function_pointer, *args):
@@ -395,7 +402,7 @@ def create_eni(connection, vpc_id, module):
private_ip_address = module.params.get("private_ip_address")
description = module.params.get("description")
security_groups = get_ec2_security_group_ids_from_names(
- module.params.get("security_groups"), connection, vpc_id=vpc_id, boto3=True
+ module.params.get("security_groups"), connection, vpc_id=vpc_id
)
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
@@ -510,7 +517,7 @@ def modify_eni(connection, module, eni):
)
changed = True
if len(security_groups) > 0:
- groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni["VpcId"], boto3=True)
+ groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni["VpcId"])
if sorted(get_sec_group_list(eni["Groups"])) != sorted(groups):
if not module.check_mode:
connection.modify_network_interface_attribute(
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
index 5ef36b258..ca0a4bb22 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
@@ -73,6 +73,7 @@ network_interfaces:
device_index: 1,
instance_id: "i-15b8d3cadbafa1234",
instance_owner_id: "123456789012",
+ "network_card_index": 0,
status: "attached"
}
availability_zone:
@@ -147,7 +148,6 @@ network_interfaces:
sample: []
requester_id:
description: The ID of the entity that launched the ENI.
- returned: always
type: str
sample: "AIDA12345EXAMPLE54321"
requester_managed:
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py
index 06089e4fe..c09cce97b 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py
@@ -359,10 +359,12 @@ options:
type: int
required: false
tenancy:
- description: Type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
+ description:
+ - Type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
+ - Support for I(tenancy=host) was added in amazon.aws 7.6.0.
type: str
required: false
- choices: ['dedicated', 'default']
+ choices: ['dedicated', 'default', 'host']
license_specifications:
description:
- The license specifications to be used for the instance.
@@ -671,16 +673,67 @@ instances:
returned: always
type: str
sample: vol-12345678
+ capacity_reservation_specification:
+ description: Information about the Capacity Reservation targeting option.
+ type: complex
+ contains:
+ capacity_reservation_preference:
+ description: Describes the Capacity Reservation preferences.
+ type: str
+ sample: open
client_token:
description: The idempotency token you provided when you launched the instance, if applicable.
returned: always
type: str
sample: mytoken
+ cpu_options:
+ description: The CPU options for the instance.
+ type: complex
+ contains:
+ core_count:
+ description: The number of CPU cores for the instance.
+ type: int
+ sample: 1
+ threads_per_core:
+ description: The number of threads per CPU core.
+ type: int
+ sample: 2
+ amd_sev_snp:
+ description: Indicates whether the instance is enabled for AMD SEV-SNP.
+ type: str
+ sample: enabled
+ current_instance_boot_mode:
+ description: The boot mode that is used to boot the instance at launch or start.
+ type: str
+ sample: legacy-bios
ebs_optimized:
description: Indicates whether the instance is optimized for EBS I/O.
returned: always
type: bool
sample: false
+ ena_support:
+ description: Specifies whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ enclave_options:
+ description: Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
+ type: dict
+ contains:
+ enabled:
+ description: If this parameter is set to true, the instance is enabled for Amazon Web Services Nitro Enclaves.
+ returned: always
+ type: bool
+ sample: false
+ hibernation_options:
+ description: Indicates whether the instance is enabled for hibernation.
+ type: dict
+ contains:
+ configured:
+ description: If true, your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.
+ returned: always
+ type: bool
+ sample: false
hypervisor:
description: The hypervisor type of the instance.
returned: always
@@ -737,6 +790,35 @@ instances:
returned: always
type: str
sample: arn:aws:license-manager:us-east-1:123456789012:license-configuration:lic-0123456789
+ metadata_options:
+ description: The metadata options for the instance.
+ returned: always
+ type: complex
+ contains:
+ http_endpoint:
+ description: Indicates whether the HTTP metadata endpoint on your instances is enabled or disabled.
+ type: str
+ sample: enabled
+ http_protocol_ipv6:
+ description: Indicates whether the IPv6 endpoint for the instance metadata service is enabled or disabled.
+ type: str
+ sample: disabled
+ http_put_response_hop_limit:
+ description: The maximum number of hops that the metadata token can travel.
+ type: int
+ sample: 1
+ http_tokens:
+ description: Indicates whether IMDSv2 is required.
+ type: str
+ sample: optional
+ instance_metadata_tags:
+ description: Indicates whether access to instance tags from the instance metadata is enabled or disabled.
+ type: str
+ sample: disabled
+ state:
+ description: The state of the metadata option changes.
+ type: str
+ sample: applied
monitoring:
description: The monitoring for the instance.
returned: always
@@ -750,7 +832,8 @@ instances:
network_interfaces:
description: One or more network interfaces for the instance.
returned: always
- type: complex
+ type: list
+ elements: dict
contains:
association:
description: The association information for an Elastic IPv4 associated with the network interface.
@@ -797,6 +880,11 @@ instances:
returned: always
type: int
sample: 0
+ network_card_index:
+ description: The index of the network card.
+ returned: always
+ type: int
+ sample: 0
status:
description: The attachment state.
returned: always
@@ -823,6 +911,11 @@ instances:
returned: always
type: str
sample: mygroup
+ interface_type:
+ description: The type of network interface.
+ returned: always
+ type: str
+ sample: interface
ipv6_addresses:
description: One or more IPv6 addresses associated with the network interface.
returned: always
@@ -849,6 +942,11 @@ instances:
returned: always
type: str
sample: 01234567890
+ private_dns_name:
+ description: The private DNS hostname name assigned to the instance.
+ type: str
+ returned: always
+ sample: ip-10-1-0-156.ec2.internal
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
@@ -862,7 +960,6 @@ instances:
contains:
association:
description: The association information for an Elastic IP address (IPv4) associated with the network interface.
- returned: always
type: complex
contains:
ip_owner_id:
@@ -885,6 +982,11 @@ instances:
returned: always
type: bool
sample: true
+ private_dns_name:
+ description: The private DNS hostname name assigned to the instance.
+ type: str
+ returned: always
+ sample: ip-10-1-0-156.ec2.internal
private_ip_address:
description: The private IPv4 address of the network interface.
returned: always
@@ -926,7 +1028,6 @@ instances:
type: str
group_id:
description: The ID of the placement group the instance is in (for cluster compute instances).
- returned: always
type: str
sample: "pg-01234566"
group_name:
@@ -936,16 +1037,13 @@ instances:
sample: "my-placement-group"
host_id:
description: The ID of the Dedicated Host on which the instance resides.
- returned: always
type: str
host_resource_group_arn:
description: The ARN of the host resource group in which the instance is in.
- returned: always
type: str
sample: "arn:aws:resource-groups:us-east-1:123456789012:group/MyResourceGroup"
partition_number:
description: The number of the partition the instance is in.
- returned: always
type: int
sample: 1
tenancy:
@@ -959,11 +1057,32 @@ instances:
type: str
version_added: 7.1.0
sample:
+ platform_details:
+ description: The platform details value for the instance.
+ returned: always
+ type: str
+ sample: Linux/UNIX
private_dns_name:
description: The private DNS name.
returned: always
type: str
sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_dns_name_options:
+ description: The options for the instance hostname.
+ type: dict
+ contains:
+ enable_resource_name_dns_a_record:
+ description: Indicates whether to respond to DNS queries for instance hostnames with DNS A records.
+ type: bool
+ sample: false
+ enable_resource_name_dns_aaaa_record:
+ description: Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.
+ type: bool
+ sample: false
+ hostname_type:
+ description: The type of hostname to assign to an instance.
+ type: str
+ sample: ip-name
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
@@ -1021,7 +1140,7 @@ instances:
returned: always
type: str
sample: my-security-group
- network.source_dest_check:
+ source_dest_check:
description: Indicates whether source/destination checking is enabled.
returned: always
type: bool
@@ -1458,7 +1577,7 @@ def build_top_level_options(params):
return spec
-def build_instance_tags(params, propagate_tags_to_volumes=True):
+def build_instance_tags(params):
tags = params.get("tags") or {}
if params.get("name") is not None:
tags["Name"] = params.get("name")
@@ -1930,7 +2049,7 @@ def change_instance_state(filters, desired_module_state):
if inst["State"]["Name"] in ("pending", "running"):
unchanged.add(inst["InstanceId"])
continue
- elif inst["State"]["Name"] == "stopping":
+ if inst["State"]["Name"] == "stopping":
await_instances([inst["InstanceId"]], desired_module_state="stopped", force_wait=True)
if module.check_mode:
@@ -2029,63 +2148,60 @@ def handle_existing(existing_matches, state, filters):
return result
-def enforce_count(existing_matches, module, desired_module_state):
+def enforce_count(existing_matches, desired_module_state):
exact_count = module.params.get("exact_count")
- try:
- current_count = len(existing_matches)
- if current_count == exact_count:
- module.exit_json(
- changed=False,
- instances=[pretty_instance(i) for i in existing_matches],
- instance_ids=[i["InstanceId"] for i in existing_matches],
- msg=f"{exact_count} instances already running, nothing to do.",
- )
+ current_count = len(existing_matches)
+ if current_count == exact_count:
+ return dict(
+ changed=False,
+ instances=[pretty_instance(i) for i in existing_matches],
+ instance_ids=[i["InstanceId"] for i in existing_matches],
+ msg=f"{exact_count} instances already running, nothing to do.",
+ )
- elif current_count < exact_count:
- # launch instances
- try:
- ensure_present(
- existing_matches=existing_matches,
- desired_module_state=desired_module_state,
- current_count=current_count,
- )
- except botocore.exceptions.ClientError as e:
- module.fail_json(e, msg="Unable to launch instances")
- elif current_count > exact_count:
- to_terminate = current_count - exact_count
- # sort the instances from least recent to most recent based on launch time
- existing_matches = sorted(existing_matches, key=lambda inst: inst["LaunchTime"])
- # get the instance ids of instances with the count tag on them
- all_instance_ids = [x["InstanceId"] for x in existing_matches]
- terminate_ids = all_instance_ids[0:to_terminate]
- if module.check_mode:
- module.exit_json(
- changed=True,
- terminated_ids=terminate_ids,
- instance_ids=all_instance_ids,
- msg=f"Would have terminated following instances if not in check mode {terminate_ids}",
- )
- # terminate instances
- try:
- client.terminate_instances(aws_retry=True, InstanceIds=terminate_ids)
- await_instances(terminate_ids, desired_module_state="terminated", force_wait=True)
- except is_boto3_error_code("InvalidInstanceID.NotFound"):
- pass
- except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
- module.fail_json(e, msg="Unable to terminate instances")
- # include data for all matched instances in addition to the list of terminations
- # allowing for recovery of metadata from the destructive operation
- module.exit_json(
- changed=True,
- msg="Successfully terminated instances.",
- terminated_ids=terminate_ids,
- instance_ids=all_instance_ids,
- instances=existing_matches,
- )
+ if current_count < exact_count:
+ # launch instances
+ return ensure_present(
+ existing_matches=existing_matches,
+ desired_module_state=desired_module_state,
+ current_count=current_count,
+ )
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to enforce instance count")
+ to_terminate = current_count - exact_count
+ # sort the instances from least recent to most recent based on launch time
+ existing_matches = sorted(existing_matches, key=lambda inst: inst["LaunchTime"])
+ # get the instance ids of instances with the count tag on them
+ all_instance_ids = [x["InstanceId"] for x in existing_matches]
+ terminate_ids = all_instance_ids[0:to_terminate]
+ if module.check_mode:
+ return dict(
+ changed=True,
+ terminated_ids=terminate_ids,
+ instance_ids=all_instance_ids,
+ msg=f"Would have terminated following instances if not in check mode {terminate_ids}",
+ )
+ # terminate instances
+ try:
+ client.terminate_instances(aws_retry=True, InstanceIds=terminate_ids)
+ await_instances(terminate_ids, desired_module_state="terminated", force_wait=True)
+ except is_boto3_error_code("InvalidInstanceID.NotFound"):
+ pass
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json(e, msg="Unable to terminate instances")
+
+ # include data for all matched instances in addition to the list of terminations
+ # allowing for recovery of metadata from the destructive operation
+ return dict(
+ changed=True,
+ msg="Successfully terminated instances.",
+ terminated_ids=terminate_ids,
+ instance_ids=all_instance_ids,
+ instances=existing_matches,
+ )
def ensure_present(existing_matches, desired_module_state, current_count=None):
@@ -2100,7 +2216,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
if module.check_mode:
if existing_matches:
instance_ids = [x["InstanceId"] for x in existing_matches]
- module.exit_json(
+ return dict(
changed=True,
instance_ids=instance_ids,
instances=existing_matches,
@@ -2108,7 +2224,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
msg="Would have launched instances if not in check_mode.",
)
else:
- module.exit_json(
+ return dict(
changed=True,
spec=instance_spec,
msg="Would have launched instances if not in check_mode.",
@@ -2144,14 +2260,14 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
all_instance_ids = [x["InstanceId"] for x in existing_matches] + instance_ids
if not module.params.get("wait"):
if existing_matches:
- module.exit_json(
+ return dict(
changed=True,
changed_ids=instance_ids,
instance_ids=all_instance_ids,
spec=instance_spec,
)
else:
- module.exit_json(
+ return dict(
changed=True,
instance_ids=instance_ids,
spec=instance_spec,
@@ -2161,7 +2277,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
if existing_matches:
all_instances = existing_matches + instances
- module.exit_json(
+ return dict(
changed=True,
changed_ids=instance_ids,
instance_ids=all_instance_ids,
@@ -2169,7 +2285,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
spec=instance_spec,
)
else:
- module.exit_json(
+ return dict(
changed=True,
instance_ids=instance_ids,
instances=[pretty_instance(i) for i in instances],
@@ -2307,7 +2423,7 @@ def main():
host_id=dict(type="str"),
host_resource_group_arn=dict(type="str"),
partition_number=dict(type="int"),
- tenancy=dict(type="str", choices=["dedicated", "default"]),
+ tenancy=dict(type="str", choices=["dedicated", "default", "host"]),
),
),
instance_initiated_shutdown_behavior=dict(type="str", choices=["stop", "terminate"]),
@@ -2396,7 +2512,7 @@ def main():
changed=False,
)
elif module.params.get("exact_count"):
- enforce_count(existing_matches, module, desired_module_state=state)
+ result = enforce_count(existing_matches, desired_module_state=state)
elif existing_matches and not module.params.get("count"):
for match in existing_matches:
warn_if_public_ip_assignment_changed(match)
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py
index 1caea9365..af12729eb 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py
@@ -161,6 +161,14 @@ instances:
returned: always
type: str
sample: vol-12345678
+ capacity_reservation_specification:
+ description: Information about the Capacity Reservation targeting option.
+ type: complex
+ contains:
+ capacity_reservation_preference:
+ description: Describes the Capacity Reservation preferences.
+ type: str
+ sample: open
cpu_options:
description: The CPU options set for the instance.
returned: always
@@ -181,11 +189,38 @@ instances:
returned: always
type: str
sample: mytoken
+ current_instance_boot_mode:
+ description: The boot mode that is used to boot the instance at launch or start.
+ type: str
+ sample: legacy-bios
ebs_optimized:
description: Indicates whether the instance is optimized for EBS I/O.
returned: always
type: bool
sample: false
+ ena_support:
+ description: Specifies whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ enclave_options:
+ description: Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
+ type: dict
+ contains:
+ enabled:
+ description: If this parameter is set to true, the instance is enabled for Amazon Web Services Nitro Enclaves.
+ returned: always
+ type: bool
+ sample: false
+ hibernation_options:
+ description: Indicates whether the instance is enabled for hibernation.
+ type: dict
+ contains:
+ configured:
+ description: If true, your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.
+ returned: always
+ type: bool
+ sample: false
hypervisor:
description: The hypervisor type of the instance.
returned: always
@@ -193,7 +228,6 @@ instances:
sample: xen
iam_instance_profile:
description: The IAM instance profile associated with the instance, if applicable.
- returned: always
type: complex
contains:
arn:
@@ -231,6 +265,44 @@ instances:
returned: always
type: str
sample: "2017-03-23T22:51:24+00:00"
+ maintenance_options:
+ description: Provides information on the recovery and maintenance options of your instance.
+ returned: always
+ type: dict
+ contains:
+ auto_recovery:
+ description: Provides information on the current automatic recovery behavior of your instance.
+ type: str
+ sample: default
+ metadata_options:
+ description: The metadata options for the instance.
+ returned: always
+ type: complex
+ contains:
+ http_endpoint:
+ description: Indicates whether the HTTP metadata endpoint on your instances is enabled or disabled.
+ type: str
+ sample: enabled
+ http_protocol_ipv6:
+ description: Indicates whether the IPv6 endpoint for the instance metadata service is enabled or disabled.
+ type: str
+ sample: disabled
+ http_put_response_hop_limit:
+ description: The maximum number of hops that the metadata token can travel.
+ type: int
+ sample: 1
+ http_tokens:
+ description: Indicates whether IMDSv2 is required.
+ type: str
+ sample: optional
+ instance_metadata_tags:
+ description: Indicates whether access to instance tags from the instance metadata is enabled or disabled.
+ type: str
+ sample: disabled
+ state:
+ description: The state of the metadata option changes.
+ type: str
+ sample: applied
monitoring:
description: The monitoring for the instance.
returned: always
@@ -291,6 +363,11 @@ instances:
returned: always
type: int
sample: 0
+ network_card_index:
+ description: The index of the network card.
+ returned: always
+ type: int
+ sample: 0
status:
description: The attachment state.
returned: always
@@ -317,6 +394,11 @@ instances:
returned: always
type: str
sample: mygroup
+ interface_type:
+ description: The type of network interface.
+ returned: always
+ type: str
+ sample: interface
ipv6_addresses:
description: One or more IPv6 addresses associated with the network interface.
returned: always
@@ -343,6 +425,11 @@ instances:
returned: always
type: str
sample: 01234567890
+ private_dns_name:
+ description: The private DNS hostname name assigned to the instance.
+ type: str
+ returned: always
+ sample: ip-10-1-0-156.ec2.internal
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
@@ -356,7 +443,6 @@ instances:
contains:
association:
description: The association information for an Elastic IP address (IPv4) associated with the network interface.
- returned: always
type: complex
contains:
ip_owner_id:
@@ -379,6 +465,11 @@ instances:
returned: always
type: bool
sample: true
+ private_dns_name:
+ description: The private DNS hostname name assigned to the instance.
+ type: str
+ returned: always
+ sample: ip-10-1-0-156.ec2.internal
private_ip_address:
description: The private IPv4 address of the network interface.
returned: always
@@ -424,11 +515,32 @@ instances:
returned: always
type: str
sample: default
+ platform_details:
+ description: The platform details value for the instance.
+ returned: always
+ type: str
+ sample: Linux/UNIX
private_dns_name:
description: The private DNS name.
returned: always
type: str
sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_dns_name_options:
+ description: The options for the instance hostname.
+ type: dict
+ contains:
+ enable_resource_name_dns_a_record:
+ description: Indicates whether to respond to DNS queries for instance hostnames with DNS A records.
+ type: bool
+ sample: false
+ enable_resource_name_dns_aaaa_record:
+ description: Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.
+ type: bool
+ sample: false
+ hostname_type:
+ description: The type of hostname to assign to an instance.
+ type: str
+ sample: ip-name
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
index 26ecaad0a..83fdd4417 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
@@ -450,6 +450,8 @@ socket.setdefaulttimeout(5)
# The ec2_metadata_facts module is a special case, while we generally dropped support for Python < 3.6
# this module doesn't depend on the SDK and still has valid use cases for folks working with older
# OSes.
+
+# pylint: disable=consider-using-f-string
try:
json_decode_error = json.JSONDecodeError
except AttributeError:
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py
index 9d16f339f..44afa7bff 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py
@@ -413,8 +413,8 @@ EXAMPLES = r"""
"""
RETURN = r"""
-group_name:
- description: Security group name
+description:
+ description: Description of security group
sample: My Security Group
type: str
returned: on create/update
@@ -423,11 +423,132 @@ group_id:
sample: sg-abcd1234
type: str
returned: on create/update
-description:
- description: Description of security group
+group_name:
+ description: Security group name
sample: My Security Group
type: str
returned: on create/update
+ip_permissions:
+ description: The inbound rules associated with the security group.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ from_port:
+ description: If the protocol is TCP or UDP, this is the start of the port range.
+ type: int
+ sample: 80
+ ip_protocol:
+ description: The IP protocol name or number.
+ returned: always
+ type: str
+ ip_ranges:
+ description: The IPv4 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ip:
+ description: The IPv4 CIDR range.
+ returned: always
+ type: str
+ ipv6_ranges:
+ description: The IPv6 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ipv6:
+ description: The IPv6 CIDR range.
+ returned: always
+ type: str
+ prefix_list_ids:
+ description: The prefix list IDs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ prefix_list_id:
+ description: The ID of the prefix.
+ returned: always
+ type: str
+ to_group:
+ description: If the protocol is TCP or UDP, this is the end of the port range.
+ type: int
+ sample: 80
+ user_id_group_pairs:
+ description: The security group and AWS account ID pairs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The security group ID of the pair.
+ returned: always
+ type: str
+ user_id:
+ description: The user ID of the pair.
+ returned: always
+ type: str
+ip_permissions_egress:
+ description: The outbound rules associated with the security group.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ip_protocol:
+ description: The IP protocol name or number.
+ returned: always
+ type: str
+ ip_ranges:
+ description: The IPv4 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ip:
+ description: The IPv4 CIDR range.
+ returned: always
+ type: str
+ ipv6_ranges:
+ description: The IPv6 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ipv6:
+ description: The IPv6 CIDR range.
+ returned: always
+ type: str
+ prefix_list_ids:
+ description: The prefix list IDs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ prefix_list_id:
+ description: The ID of the prefix.
+ returned: always
+ type: str
+ user_id_group_pairs:
+ description: The security group and AWS account ID pairs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The security group ID of the pair.
+ returned: always
+ type: str
+ user_id:
+ description: The user ID of the pair.
+ returned: always
+ type: str
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
tags:
description: Tags associated with the security group
sample:
@@ -440,35 +561,6 @@ vpc_id:
sample: vpc-abcd1234
type: str
returned: on create/update
-ip_permissions:
- description: Inbound rules associated with the security group.
- sample:
- - from_port: 8182
- ip_protocol: tcp
- ip_ranges:
- - cidr_ip: "198.51.100.1/32"
- ipv6_ranges: []
- prefix_list_ids: []
- to_port: 8182
- user_id_group_pairs: []
- type: list
- returned: on create/update
-ip_permissions_egress:
- description: Outbound rules associated with the security group.
- sample:
- - ip_protocol: -1
- ip_ranges:
- - cidr_ip: "0.0.0.0/0"
- ipv6_ranges: []
- prefix_list_ids: []
- user_id_group_pairs: []
- type: list
- returned: on create/update
-owner_id:
- description: AWS Account ID of the security group
- sample: 123456789012
- type: int
- returned: on create/update
"""
import itertools
@@ -532,7 +624,7 @@ def rule_cmp(a, b):
# equal protocols can interchange `(-1, -1)` and `(None, None)`
if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
continue
- elif getattr(a, prop) != getattr(b, prop):
+ if getattr(a, prop) != getattr(b, prop):
return False
elif getattr(a, prop) != getattr(b, prop):
return False
@@ -1296,8 +1388,7 @@ def flatten_nested_targets(module, rules):
date="2024-12-01",
collection_name="amazon.aws",
)
- for t in _flatten(target):
- yield t
+ yield from _flatten(target)
elif isinstance(target, string_types):
yield target
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py
index 8b7a04ba1..fe1002f2c 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py
@@ -107,6 +107,10 @@ security_groups:
type: list
elements: dict
contains:
+ from_port:
+ description: If the protocol is TCP or UDP, this is the start of the port range.
+ type: int
+ sample: 80
ip_protocol:
description: The IP protocol name or number.
returned: always
@@ -141,6 +145,10 @@ security_groups:
description: The ID of the prefix.
returned: always
type: str
+ to_group:
+ description: If the protocol is TCP or UDP, this is the end of the port range.
+ type: int
+ sample: 80
user_id_group_pairs:
description: The security group and AWS account ID pairs.
returned: always
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
index 6fa2ca47b..de63d3703 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
@@ -329,22 +329,6 @@ def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True):
return vol
-def get_volumes(module, ec2_conn):
- instance = module.params.get("instance")
-
- find_params = dict()
- if instance:
- find_params["Filters"] = ansible_dict_to_boto3_filter_list({"attachment.instance-id": instance})
-
- vols = []
- try:
- vols_response = ec2_conn.describe_volumes(aws_retry=True, **find_params)
- vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get("Volumes", [])]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error while getting EBS volumes")
- return vols
-
-
def delete_volume(module, ec2_conn, volume_id=None):
changed = False
if volume_id:
@@ -858,7 +842,7 @@ def main():
elif state == "absent":
if not name and not param_id:
module.fail_json("A volume name or id is required for deletion")
- if volume:
+ if volume and volume.get("state") not in ("deleting", "deleted"):
if module.check_mode:
module.exit_json(changed=True, msg="Would have deleted volume if not in check mode.")
detach_volume(module, ec2_conn, volume_dict=volume)
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py
index 34f12e789..1d41b89ea 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py
@@ -843,7 +843,8 @@ def ensure_route_table_present(connection, module):
if changed:
# pause to allow route table routes/subnets/associations to be updated before exiting with final state
sleep(5)
- module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table))
+
+ return dict(changed=changed, route_table=get_route_table_info(connection, module, route_table))
def main():
diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py
index ac3bb3642..25ebd8c84 100644
--- a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py
+++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py
@@ -236,7 +236,7 @@ EXAMPLES = r"""
Port: 80 # Required. The port on which the load balancer is listening.
# The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
SslPolicy: ELBSecurityPolicy-2015-05
- Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
+ Certificates: # The ARN of the certificate
- CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
DefaultActions:
- Type: forward # Required.
@@ -260,7 +260,7 @@ EXAMPLES = r"""
Port: 80 # Required. The port on which the load balancer is listening.
# The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
SslPolicy: ELBSecurityPolicy-2015-05
- Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
+ Certificates: # The ARN of the certificate
- CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
DefaultActions:
- Type: forward # Required.
@@ -330,6 +330,29 @@ EXAMPLES = r"""
Type: forward
state: present
+# Create an ALB with a listener having multiple listener certificates
+- amazon.aws.elb_application_lb:
+ name: myalb
+ security_groups:
+ - sg-12345678
+ - my-sec-group
+ subnets:
+ - subnet-012345678
+ - subnet-abcdef000
+ listeners:
+ - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
+ SslPolicy: ELBSecurityPolicy-2015-05
+ Certificates: # The ARN of the certificate (first certificate in the list will be set as default certificate)
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/secondtest.domain.com
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/thirdtest.domain.com
+ DefaultActions:
+ - Type: forward # Required.
+ TargetGroupName: # Required. The name of the target group
+ state: present
+
# Remove an ALB
- amazon.aws.elb_application_lb:
name: myalb
diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py
index 4008b8029..60134f0e3 100644
--- a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py
+++ b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py
@@ -1412,7 +1412,7 @@ class ElbManager:
if not self.health_check:
return False
- """Set health check values on ELB as needed"""
+ # Set health check values on ELB as needed
health_check_config = self._format_healthcheck()
if self.elb and health_check_config == self.elb["HealthCheck"]:
@@ -1490,14 +1490,6 @@ class ElbManager:
def _policy_name(self, policy_type):
return f"ec2-elb-lb-{policy_type}"
- def _get_listener_policies(self):
- """Get a list of listener policies mapped to the LoadBalancerPort"""
- if not self.elb:
- return {}
- listener_descriptions = self.elb.get("ListenerDescriptions", [])
- policies = {l["LoadBalancerPort"]: l["PolicyNames"] for l in listener_descriptions}
- return policies
-
def _set_listener_policies(self, port, policies):
self.changed = True
if self.check_mode:
@@ -1705,7 +1697,7 @@ class ElbManager:
proxy_protocol = listener.get("proxy_protocol", None)
# Only look at the listeners for which proxy_protocol is defined
if proxy_protocol is None:
- next
+ continue
instance_port = listener.get("instance_port")
if proxy_ports.get(instance_port, None) is not None:
if proxy_ports[instance_port] != proxy_protocol:
@@ -1725,10 +1717,10 @@ class ElbManager:
if any(proxy_ports.values()):
changed |= self._set_proxy_protocol_policy(proxy_policy_name)
- for port in proxy_ports:
+ for port, port_policy in proxy_ports.items():
current_policies = set(backend_policies.get(port, []))
new_policies = list(current_policies - proxy_policies)
- if proxy_ports[port]:
+ if port_policy:
new_policies.append(proxy_policy_name)
changed |= self._set_backend_policy(port, new_policies)
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py
index fb2d98e08..0a654dec5 100644
--- a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py
@@ -340,7 +340,7 @@ def main():
"The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are"
" returned for now."
),
- date="2024-08-01",
+ version="9.0.0",
collection_name="amazon.aws",
)
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_role.py b/ansible_collections/amazon/aws/plugins/modules/iam_role.py
index a7da38c31..3262a7a92 100644
--- a/ansible_collections/amazon/aws/plugins/modules/iam_role.py
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_role.py
@@ -174,8 +174,8 @@ iam_role:
description:
- the policy that grants an entity permission to assume the role
- |
- note: the case of keys in this dictionary are currently converted from CamelCase to
- snake_case. In a release after 2023-12-01 this behaviour will change
+ Note: the case of keys in this dictionary are no longer converted from CamelCase to
+ snake_case. This behaviour changed in release 8.0.0.
type: dict
returned: always
sample: {
@@ -192,23 +192,14 @@ iam_role:
'version': '2012-10-17'
}
assume_role_policy_document_raw:
- description: the policy that grants an entity permission to assume the role
+ description:
+ - |
+ Note: this return value has been deprecated and will be removed in a release after
+ 2026-05-01. assume_role_policy_document and assume_role_policy_document_raw now use
+ the same format.
type: dict
returned: always
version_added: 5.3.0
- sample: {
- 'Statement': [
- {
- 'Action': 'sts:AssumeRole',
- 'Effect': 'Allow',
- 'Principal': {
- 'Service': 'ec2.amazonaws.com'
- },
- 'Sid': ''
- }
- ],
- 'Version': '2012-10-17'
- }
attached_policies:
description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
@@ -504,7 +495,7 @@ def create_or_update_role(module, client):
role["AttachedPolicies"] = list_iam_role_attached_policies(client, role_name)
camel_role = normalize_iam_role(role, _v7_compat=True)
- module.exit_json(changed=changed, iam_role=camel_role, **camel_role)
+ module.exit_json(changed=changed, iam_role=camel_role)
def create_instance_profiles(client, check_mode, role_name, path):
@@ -658,17 +649,10 @@ def main():
)
module.deprecate(
- "All return values other than iam_role and changed have been deprecated and "
- "will be removed in a release after 2023-12-01.",
- date="2023-12-01",
- collection_name="amazon.aws",
- )
- module.deprecate(
- "In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document "
- "will no longer be converted from CamelCase to snake_case. The "
- "iam_role.assume_role_policy_document_raw return value already returns the "
- "policy document in this future format.",
- date="2023-12-01",
+ "In a release after 2026-05-01 iam_role.assume_role_policy_document_raw "
+ "will no longer be returned. Since release 8.0.0 assume_role_policy_document "
+ "has been returned with the same format as iam_role.assume_role_policy_document_raw",
+ date="2026-05-01",
collection_name="amazon.aws",
)
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py
index e77689878..fb4a06466 100644
--- a/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py
@@ -67,12 +67,16 @@ iam_roles:
description:
- The policy that grants an entity permission to assume the role
- |
- Note: the case of keys in this dictionary are currently converted from CamelCase to
- snake_case. In a release after 2023-12-01 this behaviour will change.
+ Note: the case of keys in this dictionary are no longer converted from CamelCase to
+ snake_case. This behaviour changed in release 8.0.0.
returned: always
type: dict
assume_role_policy_document_raw:
- description: The policy document describing what can assume the role.
+ description:
+ - |
+ Note: this return value has been deprecated and will be removed in a release after
+ 2026-05-01. assume_role_policy_document and assume_role_policy_document_raw now use
+ the same format.
returned: always
type: dict
version_added: 5.3.0
@@ -208,11 +212,10 @@ def main():
path_prefix = module.params["path_prefix"]
module.deprecate(
- "In a release after 2023-12-01 the contents of assume_role_policy_document "
- "will no longer be converted from CamelCase to snake_case. The "
- ".assume_role_policy_document_raw return value already returns the "
- "policy document in this future format.",
- date="2023-12-01",
+ "In a release after 2026-05-01 iam_role.assume_role_policy_document_raw "
+ "will no longer be returned. Since release 8.0.0 assume_role_policy_document "
+ "has been returned with the same format as iam_role.assume_role_policy_document_raw",
+ date="2026-05-01",
collection_name="amazon.aws",
)
@@ -226,10 +229,10 @@ def main():
if validation_error:
_prefix = "/" if not path_prefix.startswith("/") else ""
_suffix = "/" if not path_prefix.endswith("/") else ""
- path_prefix = "{_prefix}{path_prefix}{_suffix}"
+ path_prefix = f"{_prefix}{path_prefix}{_suffix}"
module.deprecate(
"In a release after 2026-05-01 paths must begin and end with /. "
- "path_prefix has been modified to '{path_prefix}'",
+ f"path_prefix has been modified to '{path_prefix}'",
date="2026-05-01",
collection_name="amazon.aws",
)
diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key.py b/ansible_collections/amazon/aws/plugins/modules/kms_key.py
index 82f73b370..47e52978d 100644
--- a/ansible_collections/amazon/aws/plugins/modules/kms_key.py
+++ b/ansible_collections/amazon/aws/plugins/modules/kms_key.py
@@ -156,6 +156,7 @@ notes:
This can cause issues when running duplicate tasks in succession or using the M(amazon.aws.kms_key_info) module to fetch key metadata
shortly after modifying keys.
For this reason, it is recommended to use the return data from this module (M(amazon.aws.kms_key)) to fetch a key's metadata.
+ - The C(policies) return key was removed in amazon.aws release 8.0.0.
"""
EXAMPLES = r"""
@@ -281,41 +282,6 @@ aliases:
sample:
- aws/acm
- aws/ebs
-policies:
- description: List of policy documents for the key. Empty when access is denied even if there are policies.
- type: list
- returned: always
- elements: str
- sample:
- Version: "2012-10-17"
- Id: "auto-ebs-2"
- Statement:
- - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
- Effect: "Allow"
- Principal:
- AWS: "*"
- Action:
- - "kms:Encrypt"
- - "kms:Decrypt"
- - "kms:ReEncrypt*"
- - "kms:GenerateDataKey*"
- - "kms:CreateGrant"
- - "kms:DescribeKey"
- Resource: "*"
- Condition:
- StringEquals:
- kms:CallerAccount: "123456789012"
- kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- - Sid: "Allow direct access to key metadata to the account"
- Effect: "Allow"
- Principal:
- AWS: "arn:aws:iam::123456789012:root"
- Action:
- - "kms:Describe*"
- - "kms:Get*"
- - "kms:List*"
- - "kms:RevokeGrant"
- Resource: "*"
key_policies:
description: List of policy documents for the key. Empty when access is denied even if there are policies.
type: list
@@ -435,14 +401,6 @@ multi_region:
sample: False
"""
-# these mappings are used to go from simple labels to the actual 'Sid' values returned
-# by get_policy. They seem to be magic values.
-statement_label = {
- "role": "Allow use of the key",
- "role grant": "Allow attachment of persistent resources",
- "admin": "Allow access for Key Administrators",
-}
-
import json
try:
@@ -462,12 +420,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
-def get_iam_roles_with_backoff(connection):
- paginator = connection.get_paginator("list_roles")
- return paginator.paginate().build_full_result()
-
-
-@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_kms_keys_with_backoff(connection):
paginator = connection.get_paginator("list_keys")
return paginator.paginate().build_full_result()
@@ -598,20 +550,11 @@ def get_key_details(connection, module, key_id):
module.fail_json_aws(e, msg="Failed to obtain key grants")
tags = get_kms_tags(connection, module, key_id)
result["tags"] = boto3_tag_list_to_ansible_dict(tags, "TagKey", "TagValue")
- result["policies"] = get_kms_policies(connection, module, key_id)
- result["key_policies"] = [json.loads(policy) for policy in result["policies"]]
+ policies = get_kms_policies(connection, module, key_id)
+ result["key_policies"] = [json.loads(policy) for policy in policies]
return result
-def get_kms_facts(connection, module):
- try:
- keys = get_kms_keys_with_backoff(connection)["Keys"]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to obtain keys")
-
- return [get_key_details(connection, module, key["KeyId"]) for key in keys]
-
-
def convert_grant_params(grant, key):
grant_params = dict(KeyId=key["key_arn"], GranteePrincipal=grant["grantee_principal"])
if grant.get("operations"):
@@ -947,13 +890,6 @@ def delete_key(connection, module, key_metadata):
return result
-def get_arn_from_role_name(iam, rolename):
- ret = iam.get_role(RoleName=rolename)
- if ret.get("Role") and ret["Role"].get("Arn"):
- return ret["Role"]["Arn"]
- raise Exception(f"could not find arn for name {rolename}.")
-
-
def canonicalize_alias_name(alias):
if alias is None:
return None
@@ -1037,15 +973,6 @@ def main():
kms = module.client("kms")
- module.deprecate(
- (
- "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned"
- " for now."
- ),
- date="2024-05-01",
- collection_name="amazon.aws",
- )
-
key_metadata = fetch_key_metadata(kms, module, module.params.get("key_id"), module.params.get("alias"))
validate_params(module, key_metadata)
diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py
index 4ba249940..6f0eb2f4b 100644
--- a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py
@@ -49,6 +49,8 @@ options:
description: Whether to get full details (tags, grants etc.) of keys pending deletion.
default: False
type: bool
+notes:
+ - The C(policies) return key was removed in amazon.aws release 8.0.0.
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
@@ -154,41 +156,6 @@ kms_keys:
sample:
Name: myKey
Purpose: protecting_stuff
- policies:
- description: List of policy documents for the key. Empty when access is denied even if there are policies.
- type: list
- returned: always
- elements: str
- sample:
- Version: "2012-10-17"
- Id: "auto-ebs-2"
- Statement:
- - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
- Effect: "Allow"
- Principal:
- AWS: "*"
- Action:
- - "kms:Encrypt"
- - "kms:Decrypt"
- - "kms:ReEncrypt*"
- - "kms:GenerateDataKey*"
- - "kms:CreateGrant"
- - "kms:DescribeKey"
- Resource: "*"
- Condition:
- StringEquals:
- kms:CallerAccount: "123456789012"
- kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- - Sid: "Allow direct access to key metadata to the account"
- Effect: "Allow"
- Principal:
- AWS: "arn:aws:iam::123456789012:root"
- Action:
- - "kms:Describe*"
- - "kms:Get*"
- - "kms:List*"
- - "kms:RevokeGrant"
- Resource: "*"
key_policies:
description: List of policy documents for the key. Empty when access is denied even if there are policies.
type: list
@@ -480,8 +447,8 @@ def get_key_details(connection, module, key_id, tokens=None):
result = camel_dict_to_snake_dict(result)
result["tags"] = boto3_tag_list_to_ansible_dict(tags, "TagKey", "TagValue")
- result["policies"] = get_kms_policies(connection, module, key_id)
- result["key_policies"] = [json.loads(policy) for policy in result["policies"]]
+ policies = get_kms_policies(connection, module, key_id)
+ result["key_policies"] = [json.loads(policy) for policy in policies]
return result
@@ -523,15 +490,6 @@ def main():
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to connect to AWS")
- module.deprecate(
- (
- "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned"
- " for now."
- ),
- date="2024-05-01",
- collection_name="amazon.aws",
- )
-
all_keys = get_kms_info(connection, module)
filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params["filters"])]
ret_params = dict(kms_keys=filtered_keys)
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py
index c916ae8e8..424ad5abe 100644
--- a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py
@@ -54,22 +54,28 @@ options:
type: str
source_params:
description:
- - Sub-parameters required for event source.
+ - Sub-parameters required for event source.
suboptions:
source_arn:
description:
- - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source.
+ - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source.
type: str
required: true
enabled:
description:
- - Indicates whether AWS Lambda should begin polling or readin from the event source.
+ - Indicates whether AWS Lambda should begin polling or readin from the event source.
default: true
type: bool
batch_size:
description:
- - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function.
- default: 100
+ - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function.
+ - Amazon Kinesis - Default V(100). Max V(10000).
+ - Amazon DynamoDB Streams - Default V(100). Max V(10000).
+ - Amazon Simple Queue Service - Default V(10). For standard queues the max is V(10000). For FIFO queues the max is V(10).
+ - Amazon Managed Streaming for Apache Kafka - Default V(100). Max V(10000).
+ - Self-managed Apache Kafka - Default C(100). Max V(10000).
+ - Amazon MQ (ActiveMQ and RabbitMQ) - Default V(100). Max V(10000).
+ - DocumentDB - Default V(100). Max V(10000).
type: int
starting_position:
description:
@@ -84,6 +90,15 @@ options:
elements: str
choices: [ReportBatchItemFailures]
version_added: 5.5.0
+ maximum_batching_window_in_seconds:
+ description:
+ - The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.
+ - You can configure O(source_params.maximum_batching_window_in_seconds) to any value from V(0) seconds to V(300) seconds in increments of seconds.
+ - For streams and Amazon SQS event sources, when O(source_params.batch_size) is set to a value greater than V(10),
+ O(source_params.maximum_batching_window_in_seconds) defaults to V(1).
+ - O(source_params.maximum_batching_window_in_seconds) is not supported by FIFO queues.
+ type: int
+ version_added: 8.0.0
required: true
type: dict
extends_documentation_fragment:
@@ -135,9 +150,11 @@ lambda_stream_events:
type: list
"""
+import copy
import re
try:
+ from botocore.exceptions import BotoCoreError
from botocore.exceptions import ClientError
from botocore.exceptions import MissingParametersError
from botocore.exceptions import ParamValidationError
@@ -146,9 +163,9 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto3_conn
-from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
# ---------------------------------------------------------------------------------------------------
#
@@ -157,122 +174,47 @@ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleA
# ---------------------------------------------------------------------------------------------------
-class AWSConnection:
- """
- Create the connection object and client objects as required.
- """
-
- def __init__(self, ansible_obj, resources, use_boto3=True):
- try:
- self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
-
- self.resource_client = dict()
- if not resources:
- resources = ["lambda"]
-
- resources.append("iam")
-
- for resource in resources:
- aws_connect_kwargs.update(
- dict(region=self.region, endpoint=self.endpoint, conn_type="client", resource=resource)
- )
- self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
-
- # if region is not provided, then get default profile/session region
- if not self.region:
- self.region = self.resource_client["lambda"].meta.region_name
-
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- ansible_obj.fail_json(msg=f"Unable to connect, authorize or access resource: {e}")
-
- # set account ID
- try:
- self.account_id = self.resource_client["iam"].get_user()["User"]["Arn"].split(":")[4]
- except (ClientError, ValueError, KeyError, IndexError):
- self.account_id = ""
-
- def client(self, resource="lambda"):
- return self.resource_client[resource]
-
-
-def pc(key):
- """
- Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
-
- :param key:
- :return:
- """
-
- return "".join([token.capitalize() for token in key.split("_")])
-
-
-def ordered_obj(obj):
- """
- Order object for comparison purposes
-
- :param obj:
- :return:
- """
-
- if isinstance(obj, dict):
- return sorted((k, ordered_obj(v)) for k, v in obj.items())
- if isinstance(obj, list):
- return sorted(ordered_obj(x) for x in obj)
- else:
- return obj
-
-
-def set_api_sub_params(params):
- """
- Sets module sub-parameters to those expected by the boto3 API.
-
- :param params:
- :return:
- """
-
- api_params = dict()
-
- for param in params.keys():
- param_value = params.get(param, None)
- if param_value:
- api_params[pc(param)] = param_value
-
- return api_params
-
-
-def validate_params(module, aws):
+def validate_params(module, client):
"""
Performs basic parameter validation.
- :param module:
- :param aws:
+ :param module: The AnsibleAWSModule object
+ :param client: The client used to perform requests to AWS
:return:
"""
function_name = module.params["lambda_function_arn"]
+ qualifier = get_qualifier(module)
# validate function name
if not re.search(r"^[\w\-:]+$", function_name):
module.fail_json(
msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.",
)
- if len(function_name) > 64 and not function_name.startswith("arn:aws:lambda:"):
- module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit')
- elif len(function_name) > 140 and function_name.startswith("arn:aws:lambda:"):
- module.fail_json(msg=f'ARN "{function_name}" exceeds 140 character limit')
-
- # check if 'function_name' needs to be expanded in full ARN format
- if not module.params["lambda_function_arn"].startswith("arn:aws:lambda:"):
- function_name = module.params["lambda_function_arn"]
- module.params["lambda_function_arn"] = f"arn:aws:lambda:{aws.region}:{aws.account_id}:function:{function_name}"
-
- qualifier = get_qualifier(module)
- if qualifier:
- function_arn = module.params["lambda_function_arn"]
- module.params["lambda_function_arn"] = f"{function_arn}:{qualifier}"
+ # lamba_fuction_arn contains only the function name (not the arn)
+ if not function_name.startswith("arn:aws:lambda:"):
+ if len(function_name) > 64:
+ module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit')
+ try:
+ params = {"FunctionName": function_name}
+ if qualifier:
+ params["Qualifier"] = qualifier
+ response = client.get_function(**params)
+ module.params["lambda_function_arn"] = response["Configuration"]["FunctionArn"]
+ except is_boto3_error_code("ResourceNotFoundException"):
+ msg = f"An error occurred: The function '{function_name}' does not exist."
+ if qualifier:
+ msg = f"An error occurred: The function '{function_name}' (qualifier={qualifier}) does not exist."
+ module.fail_json(msg=msg)
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json(msg=f"An error occurred while trying to describe function '{function_name}': {e}")
+ else:
+ if len(function_name) > 140:
+ module.fail_json(msg=f'ARN "{function_name}" exceeds 140 character limit')
- return
+ if qualifier:
+ module.params["lambda_function_arn"] = f"{function_name}:{qualifier}"
def get_qualifier(module):
@@ -302,7 +244,38 @@ def get_qualifier(module):
# ---------------------------------------------------------------------------------------------------
-def lambda_event_stream(module, aws):
+def set_default_values(module, source_params):
+ _source_params_cpy = copy.deepcopy(source_params)
+
+ if module.params["event_source"].lower() == "sqs":
+ # Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
+ _source_params_cpy.setdefault("batch_size", 10)
+
+ if source_params["source_arn"].endswith(".fifo"):
+ if _source_params_cpy["batch_size"] > 10:
+ module.fail_json(msg="For FIFO queues the maximum batch_size is 10.")
+ if _source_params_cpy.get("maximum_batching_window_in_seconds"):
+ module.fail_json(
+ msg="maximum_batching_window_in_seconds is not supported by Amazon SQS FIFO event sources."
+ )
+ else:
+ if _source_params_cpy["batch_size"] >= 10000:
+ module.fail_json(msg="For standard queue batch_size must be between lower than 10000.")
+
+ elif module.params["event_source"].lower() == "stream":
+ # Default 100.
+ _source_params_cpy.setdefault("batch_size", 100)
+
+ if not (100 <= _source_params_cpy["batch_size"] <= 10000):
+ module.fail_json(msg="batch_size for streams must be between 100 and 10000")
+
+ if _source_params_cpy["batch_size"] > 10 and not _source_params_cpy.get("maximum_batching_window_in_seconds"):
+ _source_params_cpy["maximum_batching_window_in_seconds"] = 1
+
+ return _source_params_cpy
+
+
+def lambda_event_stream(module, client):
"""
Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
:param module:
@@ -310,7 +283,6 @@ def lambda_event_stream(module, aws):
:return:
"""
- client = aws.client("lambda")
facts = dict()
changed = False
current_state = "absent"
@@ -327,15 +299,8 @@ def lambda_event_stream(module, aws):
else:
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
- # check if optional sub-parameters are valid, if present
- batch_size = source_params.get("batch_size")
- if batch_size:
- try:
- source_params["batch_size"] = int(batch_size)
- except ValueError:
- module.fail_json(
- msg=f"Source parameter 'batch_size' must be an integer, found: {source_params['batch_size']}"
- )
+ if state == "present":
+ source_params = set_default_values(module, source_params)
# optional boolean value needs special treatment as not present does not imply False
source_param_enabled = module.boolean(source_params.get("enabled", "True"))
@@ -351,18 +316,21 @@ def lambda_event_stream(module, aws):
if state == "present":
if current_state == "absent":
starting_position = source_params.get("starting_position")
- if starting_position:
+ event_source = module.params.get("event_source")
+ if event_source == "stream":
+ if not starting_position:
+ module.fail_json(
+ msg="Source parameter 'starting_position' is required for stream event notification."
+ )
api_params.update(StartingPosition=starting_position)
- elif module.params.get("event_source") == "sqs":
- # starting position is not required for SQS
- pass
- else:
- module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
-
- if source_arn:
- api_params.update(Enabled=source_param_enabled)
+
+ api_params.update(Enabled=source_param_enabled)
if source_params.get("batch_size"):
api_params.update(BatchSize=source_params.get("batch_size"))
+ if source_params.get("maximum_batching_window_in_seconds"):
+ api_params.update(
+ MaximumBatchingWindowInSeconds=source_params.get("maximum_batching_window_in_seconds")
+ )
if source_params.get("function_response_types"):
api_params.update(FunctionResponseTypes=source_params.get("function_response_types"))
@@ -375,9 +343,8 @@ def lambda_event_stream(module, aws):
else:
# current_state is 'present'
- api_params = dict(FunctionName=module.params["lambda_function_arn"])
current_mapping = facts[0]
- api_params.update(UUID=current_mapping["UUID"])
+ api_params = dict(FunctionName=module.params["lambda_function_arn"], UUID=current_mapping["UUID"])
mapping_changed = False
# check if anything changed
@@ -426,7 +393,18 @@ def main():
state=dict(required=False, default="present", choices=["present", "absent"]),
lambda_function_arn=dict(required=True, aliases=["function_name", "function_arn"]),
event_source=dict(required=False, default="stream", choices=source_choices),
- source_params=dict(type="dict", required=True),
+ source_params=dict(
+ type="dict",
+ required=True,
+ options=dict(
+ source_arn=dict(type="str", required=True),
+ enabled=dict(type="bool", default=True),
+ batch_size=dict(type="int"),
+ starting_position=dict(type="str", choices=["TRIM_HORIZON", "LATEST"]),
+ function_response_types=dict(type="list", elements="str", choices=["ReportBatchItemFailures"]),
+ maximum_batching_window_in_seconds=dict(type="int"),
+ ),
+ ),
alias=dict(required=False, default=None),
version=dict(type="int", required=False, default=0),
)
@@ -438,12 +416,15 @@ def main():
required_together=[],
)
- aws = AWSConnection(module, ["lambda"])
+ try:
+ client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff())
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Trying to connect to AWS")
- validate_params(module, aws)
+ validate_params(module, client)
if module.params["event_source"].lower() in ("stream", "sqs"):
- results = lambda_event_stream(module, aws)
+ results = lambda_event_stream(module, client)
else:
module.fail_json(msg="Please select `stream` or `sqs` as the event type")
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py
index 83ba4feaa..fbd443bb7 100644
--- a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py
@@ -95,7 +95,7 @@ functions:
elements: str
architectures:
description: The architectures supported by the function.
- returned: successful run where botocore >= 1.21.51
+ returned: success
type: list
elements: str
sample: ['arm64']
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py
index 0e5634e59..30a7145e7 100644
--- a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py
@@ -170,7 +170,6 @@ options:
- For the full list of DB instance classes and availability for your engine visit
U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html).
- This setting is required to create a Multi-AZ DB cluster.
- - I(db_cluster_instance_class) require botocore >= 1.23.44.
type: str
version_added: 5.5.0
enable_iam_database_authentication:
@@ -182,7 +181,6 @@ options:
description:
- The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.
- This setting is required to create a Multi-AZ DB cluster.
- - I(allocated_storage) require botocore >= 1.23.44.
type: int
version_added: 5.5.0
storage_type:
@@ -190,7 +188,6 @@ options:
- Specifies the storage type to be associated with the DB cluster.
- This setting is required to create a Multi-AZ DB cluster.
- When specified, a value for the I(iops) parameter is required.
- - I(storage_type) require botocore >= 1.23.44.
- Defaults to C(io1).
type: str
choices:
@@ -201,7 +198,6 @@ options:
- The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.
- This setting is required to create a Multi-AZ DB cluster
- Must be a multiple between .5 and 50 of the storage amount for the DB cluster.
- - I(iops) require botocore >= 1.23.44.
type: int
version_added: 5.5.0
engine:
@@ -1174,7 +1170,7 @@ def ensure_present(cluster, parameters, method_name, method_options_name):
return changed
-def handle_remove_from_global_db(module, cluster):
+def handle_remove_from_global_db(cluster):
global_cluster_id = module.params.get("global_cluster_identifier")
db_cluster_id = module.params.get("db_cluster_identifier")
db_cluster_arn = cluster["DBClusterArn"]
@@ -1361,7 +1357,7 @@ def main():
if method_name == "delete_db_cluster":
if cluster and module.params.get("remove_from_global_db"):
if cluster["Engine"] in ["aurora", "aurora-mysql", "aurora-postgresql"]:
- changed = handle_remove_from_global_db(module, cluster)
+ changed = handle_remove_from_global_db(cluster)
call_method(client, module, method_name, eval(method_options_name)(parameters))
changed = True
@@ -1377,7 +1373,7 @@ def main():
if cluster["Engine"] in ["aurora", "aurora-mysql", "aurora-postgresql"]:
if changed:
wait_for_cluster_status(client, module, cluster_id, "cluster_available")
- changed |= handle_remove_from_global_db(module, cluster)
+ changed |= handle_remove_from_global_db(cluster)
result = camel_dict_to_snake_dict(get_cluster(cluster_id))
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group.py
new file mode 100644
index 000000000..dc94bca1a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: rds_cluster_param_group
+version_added: 7.6.0
+short_description: Manage RDS cluster parameter groups
+description:
+ - Creates, modifies, and deletes RDS cluster parameter groups.
+options:
+ state:
+ description:
+ - Specifies whether the RDS cluster parameter group should be present or absent.
+ default: present
+ choices: [ 'present' , 'absent' ]
+ type: str
+ name:
+ description:
+ - The name of the RDS cluster parameter group to create, modify or delete.
+ required: true
+ type: str
+ description:
+ description:
+ - The description for the RDS cluster parameter group.
+ - Required for O(state=present).
+ type: str
+ db_parameter_group_family:
+ description:
+ - The RDS cluster parameter group family name.
+ - An RDS cluster parameter group can be associated with one and only one RDS cluster parameter group family,
+ and can be applied only to a RDS cluster running a database engine and engine version compatible with that RDS cluster parameter group family.
+ - Please use M(amazon.aws.rds_engine_versions_info) module To list all of the available parameter group families for a DB engine.
+ - The RDS cluster parameter group family is immutable and can't be changed when updating a RDS cluster parameter group.
+ See U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbclusterparametergroup.html)
+ - Required for O(state=present).
+ type: str
+ parameters:
+ description:
+ - A list of parameters to update.
+ type: list
+ elements: dict
+ suboptions:
+ parameter_name:
+ description: Specifies the name of the parameter.
+ type: str
+ required: true
+ parameter_value:
+ description:
+ - Specifies the value of the parameter.
+ type: str
+ required: true
+ apply_method:
+ description:
+ - Indicates when to apply parameter updates.
+ choices:
+ - immediate
+ - pending-reboot
+ type: str
+ required: true
+author:
+ - "Aubin Bikouo (@abikouo)"
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: Add or change a parameter group, in this case setting authentication_timeout to 200
+ amazon.aws.rds_cluster_param_group:
+ state: present
+ name: test-cluster-group
+ description: 'My test RDS cluster group'
+ db_parameter_group_family: 'mysql5.6'
+ parameters:
+ - parameter_name: authentication_timeout
+ parameter_value: "200"
+ apply_method: immediate
+ tags:
+ Environment: production
+ Application: parrot
+
+- name: Remove a parameter group
+ amazon.aws.rds_param_group:
+ state: absent
+ name: test-cluster-group
+"""
+
+RETURN = r"""
+db_cluster_parameter_group:
+ description: dictionary containing all the RDS cluster parameter group information
+ returned: success
+ type: complex
+ contains:
+ db_cluster_parameter_group_arn:
+ description: The Amazon Resource Name (ARN) for the RDS cluster parameter group.
+ type: str
+ returned: when state is present
+ db_cluster_parameter_group_name:
+ description: The name of the RDS cluster parameter group.
+ type: str
+ returned: when state is present
+ db_parameter_group_family:
+ description: The name of the RDS parameter group family that this RDS cluster parameter group is compatible with.
+ type: str
+ returned: when state is present
+ description:
+ description: Provides the customer-specified description for this RDS cluster parameter group.
+ type: str
+ returned: when state is present
+ tags:
+ description: dictionary of tags
+ type: dict
+ returned: when state is present
+"""
+
+from itertools import zip_longest
+from typing import Any
+from typing import Dict
+from typing import List
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.rds import describe_db_cluster_parameter_groups
+from ansible_collections.amazon.aws.plugins.module_utils.rds import describe_db_cluster_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+
+
+def modify_parameters(
+ module: AnsibleAWSModule, connection: Any, group_name: str, parameters: List[Dict[str, Any]]
+) -> bool:
+ current_params = describe_db_cluster_parameters(module, connection, group_name)
+ parameters = snake_dict_to_camel_dict(parameters, capitalize_first=True)
+ # compare current resource parameters with the value from module parameters
+ changed = False
+ for param in parameters:
+ found = False
+ for current_p in current_params:
+ if param.get("ParameterName") == current_p.get("ParameterName"):
+ found = True
+ if not current_p["IsModifiable"]:
+ module.fail_json(f"The parameter {param.get('ParameterName')} cannot be modified")
+ changed |= any((current_p.get(k) != v for k, v in param.items()))
+ if not found:
+ module.fail_json(msg=f"Could not find parameter with name: {param.get('ParameterName')}")
+ if changed:
+ if not module.check_mode:
+ # When calling modify_db_cluster_parameter_group() function
+ # A maximum of 20 parameters can be modified in a single request.
+ # This is why we are creating chunk containing at max 20 items
+ for chunk in zip_longest(*[iter(parameters)] * 20, fillvalue=None):
+ non_empty_chunk = [item for item in chunk if item]
+ try:
+ connection.modify_db_cluster_parameter_group(
+ aws_retry=True, DBClusterParameterGroupName=group_name, Parameters=non_empty_chunk
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't update RDS cluster parameters")
+ return changed
+
+
+def ensure_present(module: AnsibleAWSModule, connection: Any) -> None:
+ group_name = module.params["name"]
+ db_parameter_group_family = module.params["db_parameter_group_family"]
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ changed = False
+
+ response = describe_db_cluster_parameter_groups(module=module, connection=connection, group_name=group_name)
+ if not response:
+ # Create RDS cluster parameter group
+ params = dict(
+ DBClusterParameterGroupName=group_name,
+ DBParameterGroupFamily=db_parameter_group_family,
+ Description=module.params["description"],
+ )
+ if tags:
+ params["Tags"] = ansible_dict_to_boto3_tag_list(tags)
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have create RDS parameter group if not in check mode.")
+ try:
+ response = connection.create_db_cluster_parameter_group(aws_retry=True, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create parameter group")
+ else:
+ group = response[0]
+ if db_parameter_group_family != group["DBParameterGroupFamily"]:
+ module.warn(
+ "The RDS cluster parameter group family is immutable and can't be changed when updating a RDS cluster parameter group."
+ )
+
+ if tags:
+ existing_tags = get_tags(connection, module, group["DBClusterParameterGroupArn"])
+ changed = ensure_tags(
+ connection, module, group["DBClusterParameterGroupArn"], existing_tags, tags, purge_tags
+ )
+
+ parameters = module.params.get("parameters")
+ if parameters:
+ changed |= modify_parameters(module, connection, group_name, parameters)
+
+ response = describe_db_cluster_parameter_groups(module=module, connection=connection, group_name=group_name)
+ group = camel_dict_to_snake_dict(response[0])
+ group["tags"] = get_tags(connection, module, group["db_cluster_parameter_group_arn"])
+
+ module.exit_json(changed=changed, db_cluster_parameter_group=group)
+
+
+def ensure_absent(module: AnsibleAWSModule, connection: Any) -> None:
+ group = module.params["name"]
+ response = describe_db_cluster_parameter_groups(module=module, connection=connection, group_name=group)
+ if not response:
+ module.exit_json(changed=False, msg="The RDS cluster parameter group does not exist.")
+
+ if not module.check_mode:
+ try:
+ response = connection.delete_db_cluster_parameter_group(aws_retry=True, DBClusterParameterGroupName=group)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete RDS cluster parameter group")
+ module.exit_json(changed=True)
+
+
+def main() -> None:
+ argument_spec = dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(required=True),
+ db_parameter_group_family=dict(),
+ description=dict(),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ parameters=dict(
+ type="list",
+ elements="dict",
+ options=dict(
+ parameter_name=dict(required=True),
+ parameter_value=dict(required=True),
+ apply_method=dict(choices=["immediate", "pending-reboot"], required=True),
+ ),
+ ),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[["state", "present", ["description", "db_parameter_group_family"]]],
+ supports_check_mode=True,
+ )
+
+ try:
+ connection = module.client("rds", retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
+
+ if module.params.get("state") == "present":
+ ensure_present(module=module, connection=connection)
+ else:
+ ensure_absent(module=module, connection=connection)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group_info.py
new file mode 100644
index 000000000..bad0433a7
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group_info.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024 Aubin Bikouo (@abikouo)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+module: rds_cluster_param_group_info
+version_added: 7.6.0
+short_description: Describes the properties of specific RDS cluster parameter group.
+description:
+ - Obtain information about a list or one specific RDS cluster parameter group.
+options:
+ name:
+ description:
+ - The RDS cluster parameter group name.
+ type: str
+ include_parameters:
+ description:
+ - Specifies whether to include the detailed parameters of the RDS cluster parameter group.
+ - V(all) include all parameters.
+ - V(engine-default) include engine-default parameters.
+ - V(system) include system parameters.
+ - V(user) include user parameters.
+ type: str
+ choices:
+ - all
+ - engine-default
+ - system
+ - user
+author:
+ - Aubin Bikouo (@abikouo)
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: Describe a specific RDS cluster parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: myrdsclustergroup
+
+- name: Describe all RDS cluster parameter group
+ amazon.aws.rds_cluster_param_group_info:
+
+- name: Describe a specific RDS cluster parameter group including user parameters
+ amazon.aws.rds_cluster_param_group_info:
+ name: myrdsclustergroup
+ include_parameters: user
+"""
+
+RETURN = r"""
+db_cluster_parameter_groups:
+ description: List of RDS cluster parameter groups.
+ returned: always
+ type: list
+ contains:
+ db_cluster_parameter_group_name:
+ description:
+ - The name of the RDS cluster parameter group.
+ type: str
+ db_parameter_group_family:
+ description:
+ - The name of the RDS parameter group family that this RDS cluster parameter group is compatible with.
+ type: str
+ description:
+ description:
+ - Provides the customer-specified description for this RDS cluster parameter group.
+ type: str
+ db_cluster_parameter_group_arn:
+ description:
+ - The Amazon Resource Name (ARN) for the RDS cluster parameter group.
+ type: str
+ db_parameters:
+ description:
+ - Provides a list of parameters for the RDS cluster parameter group.
+ returned: When O(include_parameters) is set
+ type: list
+ elements: dict
+ sample: [
+ {
+ "allowed_values": "1-600",
+ "apply_method": "pending-reboot",
+ "apply_type": "dynamic",
+ "data_type": "integer",
+ "description": "(s) Sets the maximum allowed time to complete client authentication.",
+ "is_modifiable": true,
+ "parameter_name": "authentication_timeout",
+ "parameter_value": "100",
+ "source": "user",
+ "supported_engine_modes": [
+ "provisioned"
+ ]
+ }
+ ]
+ tags:
+ description: A dictionary of key value pairs.
+ type: dict
+ sample: {
+ "Name": "rds-cluster-demo"
+ }
+"""
+
+from typing import Any
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.rds import describe_db_cluster_parameter_groups
+from ansible_collections.amazon.aws.plugins.module_utils.rds import describe_db_cluster_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+
+def describe_rds_cluster_parameter_group(connection: Any, module: AnsibleAWSModule) -> None:
+ group_name = module.params.get("name")
+ include_parameters = module.params.get("include_parameters")
+ results = []
+ response = describe_db_cluster_parameter_groups(module, connection, group_name)
+ if response:
+ for resource in response:
+ resource["tags"] = get_tags(connection, module, resource["DBClusterParameterGroupArn"])
+ if include_parameters is not None:
+ resource["db_parameters"] = describe_db_cluster_parameters(
+ module, connection, resource["DBClusterParameterGroupName"], include_parameters
+ )
+ results.append(camel_dict_to_snake_dict(resource, ignore_list=["tags"]))
+ module.exit_json(changed=False, db_cluster_parameter_groups=results)
+
+
+def main() -> None:
+ argument_spec = dict(
+ name=dict(),
+ include_parameters=dict(choices=["user", "all", "system", "engine-default"]),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS.")
+
+ describe_rds_cluster_parameter_group(client, module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_engine_versions_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_engine_versions_info.py
new file mode 100644
index 000000000..c2391946c
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_engine_versions_info.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024 Aubin Bikouo (@abikouo)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+module: rds_engine_versions_info
+version_added: 7.6.0
+short_description: Describes the properties of specific versions of DB engines.
+description:
+ - Obtain information about a specific versions of DB engines.
+options:
+ engine:
+ description:
+ - The database engine to return version details for.
+ type: str
+ choices:
+ - aurora-mysql
+ - aurora-postgresql
+ - custom-oracle-ee
+ - db2-ae
+ - db2-se
+ - mariadb
+ - mysql
+ - oracle-ee
+ - oracle-ee-cdb
+ - oracle-se2
+ - oracle-se2-cdb
+ - postgres
+ - sqlserver-ee
+ - sqlserver-se
+ - sqlserver-ex
+ - sqlserver-web
+ engine_version:
+ description:
+ - A specific database engine version to return details for.
+ type: str
+ db_parameter_group_family:
+ description:
+ - The name of a specific RDS parameter group family to return details for.
+ type: str
+ default_only:
+ description:
+ - Specifies whether to return only the default version of the specified engine
+ or the engine and major version combination.
+ type: bool
+ default: False
+ filters:
+ description:
+ - A filter that specifies one or more DB engine versions to describe.
+ See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBEngineVersions.html).
+ type: dict
+author:
+ - Aubin Bikouo (@abikouo)
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: List all of the available parameter group families for the Aurora PostgreSQL DB engine
+ amazon.aws.rds_engine_versions_info:
+ engine: aurora-postgresql
+
+- name: List all of the available parameter group families for the Aurora PostgreSQL DB engine on a specific version
+ amazon.aws.rds_engine_versions_info:
+ engine: aurora-postgresql
+ engine_version: 16.1
+
+- name: Get default engine version for DB parameter group family postgres16
+ amazon.aws.rds_engine_versions_info:
+ engine: postgres
+ default_only: true
+ db_parameter_group_family: postgres16
+"""
+
+RETURN = r"""
+db_engine_versions:
+ description: List of RDS engine versions.
+ returned: always
+ type: list
+ contains:
+ engine:
+ description:
+ - The name of the database engine.
+ type: str
+ engine_version:
+ description:
+ - The version number of the database engine.
+ type: str
+ db_parameter_group_family:
+ description:
+ - The name of the DB parameter group family for the database engine.
+ type: str
+ db_engine_description:
+ description:
+ - The description of the database engine.
+ type: str
+ db_engine_version_description:
+ description:
+ - The description of the database engine version.
+ type: str
+ default_character_set:
+ description:
+ - The default character set for new instances of this engine version.
+ type: dict
+ sample: {
+ "character_set_description": "Unicode 5.0 UTF-8 Universal character set",
+ "character_set_name": "AL32UTF8"
+ }
+ image:
+ description:
+ - The EC2 image
+ type: complex
+ contains:
+ image_id:
+ description:
+ - A value that indicates the ID of the AMI.
+ type: str
+ status:
+ description:
+ - A value that indicates the status of a custom engine version (CEV).
+ type: str
+ db_engine_media_type:
+ description:
+ - A value that indicates the source media provider of the AMI based on the usage operation.
+ type: str
+ supported_character_sets:
+ description:
+ - A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance operation.
+ type: list
+ elements: dict
+ contains:
+ character_set_name:
+ description:
+ - The name of the character set.
+ type: str
+ character_set_description:
+ description:
+ - The description of the character set.
+ type: str
+ supported_nchar_character_sets:
+ description:
+ - A list of the character sets supported by the Oracle DB engine.
+ type: list
+ elements: dict
+ contains:
+ character_set_name:
+ description:
+ - The name of the character set.
+ type: str
+ character_set_description:
+ description:
+ - The description of the character set.
+ type: str
+ valid_upgrade_target:
+ description:
+ - A list of engine versions that this database engine version can be upgraded to.
+ type: list
+ elements: dict
+ sample: [
+ {
+ "auto_upgrade": false,
+ "description": "Aurora PostgreSQL (Compatible with PostgreSQL 15.5)",
+ "engine": "aurora-postgresql",
+ "engine_version": "15.5",
+ "is_major_version_upgrade": false,
+ "supported_engine_modes": [
+ "provisioned"
+ ],
+ "supports_babelfish": true,
+ "supports_global_databases": true,
+ "supports_integrations": false,
+ "supports_local_write_forwarding": true,
+ "supports_parallel_query": false
+ }
+ ]
+ supported_timezones:
+ description:
+ - A list of the time zones supported by this engine for the Timezone parameter of the CreateDBInstance action.
+ type: list
+ elements: dict
+ sample: [
+ {"TimezoneName": "xxx"}
+ ]
+ exportable_log_types:
+ description:
+ - The types of logs that the database engine has available for export to CloudWatch Logs.
+ type: list
+ elements: str
+ supports_log_exports_to_cloudwatchLogs:
+ description:
+ - Indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.
+ type: bool
+ supports_read_replica:
+ description:
+ - Indicates whether the database engine version supports read replicas.
+ type: bool
+ supported_engine_modes:
+ description:
+ - A list of the supported DB engine modes.
+ type: list
+ elements: str
+ supported_feature_names:
+ description:
+ - A list of features supported by the DB engine.
+ type: list
+ elements: str
+ sample: [
+ "Comprehend",
+ "Lambda",
+ "s3Export",
+ "s3Import",
+ "SageMaker"
+ ]
+ status:
+ description:
+ - The status of the DB engine version, either available or deprecated.
+ type: str
+ supports_parallel_query:
+ description:
+ - Indicates whether you can use Aurora parallel query with a specific DB engine version.
+ type: bool
+ supports_global_databases:
+ description:
+ - Indicates whether you can use Aurora global databases with a specific DB engine version.
+ type: bool
+ major_engine_version:
+ description:
+ - The major engine version of the CEV.
+ type: str
+ database_installation_files_s3_bucket_name:
+ description:
+ - The name of the Amazon S3 bucket that contains your database installation files.
+ type: str
+ database_installation_files_s3_prefix:
+ description:
+ - The Amazon S3 directory that contains the database installation files.
+ type: str
+ db_engine_version_arn:
+ description:
+ - The ARN of the custom engine version.
+ type: str
+ kms_key_id:
+ description:
+ - The Amazon Web Services KMS key identifier for an encrypted CEV.
+ type: str
+ create_time:
+ description:
+ - The creation time of the DB engine version.
+ type: str
+ tags:
+ description: A dictionary of key value pairs.
+ type: dict
+ sample: {
+ "some": "tag"
+ }
+ supports_babelfish:
+ description:
+ - Indicates whether the engine version supports Babelfish for Aurora PostgreSQL.
+ type: bool
+ custom_db_engine_version_manifest:
+ description:
+ - JSON string that lists the installation files and parameters that RDS Custom uses to create a custom engine version (CEV).
+ type: str
+ supports_certificate_rotation_without_restart:
+ description:
+ - Indicates whether the engine version supports rotating the server certificate without rebooting the DB instance.
+ type: bool
+ supported_ca_certificate_identifiers:
+ description:
+ - A list of the supported CA certificate identifiers.
+ type: list
+ elements: str
+ sample: [
+ "rds-ca-2019",
+ "rds-ca-ecc384-g1",
+ "rds-ca-rsa4096-g1",
+ "rds-ca-rsa2048-g1"
+ ]
+ supports_local_write_forwarding:
+ description:
+ - Indicates whether the DB engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster.
+ type: bool
+ supports_integrations:
+ description:
+ - Indicates whether the DB engine version supports zero-ETL integrations with Amazon Redshift.
+ type: bool
+"""
+
+from typing import Any
+from typing import Dict
+from typing import List
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_db_engine_versions(connection: Any, **params: Dict[str, Any]) -> List[Dict[str, Any]]:
+ paginator = connection.get_paginator("describe_db_engine_versions")
+ return paginator.paginate(**params).build_full_result()["DBEngineVersions"]
+
+
+def describe_db_engine_versions(connection: Any, module: AnsibleAWSModule) -> Dict[str, Any]:
+ engine = module.params.get("engine")
+ engine_version = module.params.get("engine_version")
+ db_parameter_group_family = module.params.get("db_parameter_group_family")
+ default_only = module.params.get("default_only")
+ filters = module.params.get("filters")
+
+ params = {"DefaultOnly": default_only}
+ if engine:
+ params["Engine"] = engine
+ if engine_version:
+ params["EngineVersion"] = engine_version
+ if db_parameter_group_family:
+ params["DBParameterGroupFamily"] = db_parameter_group_family
+ if filters:
+ params["Filters"] = filters
+
+ try:
+ result = _describe_db_engine_versions(connection, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get RDS engine versions.")
+
+ def _transform_item(v):
+ tag_list = v.pop("TagList", [])
+ v = camel_dict_to_snake_dict(v)
+ v["tags"] = boto3_tag_list_to_ansible_dict(tag_list)
+ return v
+
+ return dict(changed=False, db_engine_versions=[_transform_item(v) for v in result])
+
+
+def main() -> None:
+ argument_spec = dict(
+ engine=dict(
+ choices=[
+ "aurora-mysql",
+ "aurora-postgresql",
+ "custom-oracle-ee",
+ "db2-ae",
+ "db2-se",
+ "mariadb",
+ "mysql",
+ "oracle-ee",
+ "oracle-ee-cdb",
+ "oracle-se2",
+ "oracle-se2-cdb",
+ "postgres",
+ "sqlserver-ee",
+ "sqlserver-se",
+ "sqlserver-ex",
+ "sqlserver-web",
+ ]
+ ),
+ engine_version=dict(),
+ db_parameter_group_family=dict(),
+ default_only=dict(type="bool", default=False),
+ filters=dict(type="dict"),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS.")
+
+ module.exit_json(**describe_db_engine_versions(client, module))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py
index 4451d7638..0362df0ba 100644
--- a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py
@@ -43,7 +43,9 @@ options:
type: bool
default: false
purge_cloudwatch_logs_exports:
- description: Set to C(false) to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance.
+ description:
+ - Set to C(false) to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance.
+ - Set I(enable_cloudwatch_logs_exports) to an empty list to disable all.
type: bool
default: true
read_replica:
@@ -1028,7 +1030,7 @@ def get_options_with_changing_values(client, module, parameters):
parameters["DBPortNumber"] = port
if not force_update_password:
parameters.pop("MasterUserPassword", None)
- if cloudwatch_logs_enabled:
+ if cloudwatch_logs_enabled is not None:
parameters["CloudwatchLogsExportConfiguration"] = cloudwatch_logs_enabled
if not module.params["storage_type"]:
parameters.pop("Iops", None)
@@ -1162,8 +1164,7 @@ def get_current_attributes_with_inconsistent_keys(instance):
def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs, purge_security_groups):
changing_params = {}
current_options = get_current_attributes_with_inconsistent_keys(instance)
- for option in current_options:
- current_option = current_options[option]
+ for option, current_option in current_options.items():
desired_option = modify_params.pop(option, None)
if desired_option is None:
continue
@@ -1565,8 +1566,7 @@ def main():
instance = get_instance(client, module, instance_id)
if instance:
break
- else:
- sleep(5)
+ sleep(5)
if state == "absent" and changed and not module.params["skip_final_snapshot"]:
instance.update(
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_param_group.py
index abdb57c9b..82d0112fd 100644
--- a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_param_group.py
@@ -6,7 +6,7 @@
DOCUMENTATION = r"""
---
-module: rds_param_group
+module: rds_instance_param_group
version_added: 5.0.0
short_description: manage RDS parameter groups
description:
@@ -31,8 +31,7 @@ options:
engine:
description:
- The type of database for this group.
- - Please use following command to get list of all supported db engines and their respective versions.
- - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"'
+ - Please use M(amazon.aws.rds_engine_versions_info) to get list of all supported db engines and their respective versions.
- The DB parameter group family is immutable and can't be changed when updating a DB parameter group.
See U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbparametergroup.html)
- Required for I(state=present).
@@ -61,7 +60,7 @@ extends_documentation_fragment:
EXAMPLES = r"""
- name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
state: present
name: norwegian-blue
description: 'My Fancy Ex Parrot Group'
@@ -73,7 +72,7 @@ EXAMPLES = r"""
Application: parrot
- name: Remove a parameter group
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
state: absent
name: norwegian-blue
"""
@@ -149,9 +148,9 @@ def convert_parameter(param, value):
if param["DataType"] == "integer":
if isinstance(value, string_types):
try:
- for modifier in INT_MODIFIERS.keys():
- if value.endswith(modifier):
- converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
+ for name, modifier in INT_MODIFIERS.items():
+ if value.endswith(name):
+ converted_value = int(value[:-1]) * modifier
except ValueError:
# may be based on a variable (ie. {foo*3/4}) so
# just pass it on through to the AWS SDK
diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py
index 369c7c774..b2924145d 100644
--- a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py
+++ b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py
@@ -535,21 +535,21 @@ def update_health_check(existing_check):
return True, "update", check_id
-def describe_health_check(id):
- if not id:
+def describe_health_check(check_id):
+ if not check_id:
return dict()
try:
result = client.get_health_check(
aws_retry=True,
- HealthCheckId=id,
+ HealthCheckId=check_id,
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get health check.", id=id)
+ module.fail_json_aws(e, msg="Failed to get health check.", id=check_id)
health_check = result.get("HealthCheck", {})
health_check = camel_dict_to_snake_dict(health_check)
- tags = get_tags(module, client, "healthcheck", id)
+ tags = get_tags(module, client, "healthcheck", check_id)
health_check["tags"] = tags
return health_check
@@ -705,7 +705,7 @@ def main():
if check_id:
changed |= manage_tags(module, client, "healthcheck", check_id, tags, purge_tags)
- health_check = describe_health_check(id=check_id)
+ health_check = describe_health_check(check_id)
health_check["action"] = action
module.exit_json(
changed=changed,
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
index d68223ede..d259286f9 100644
--- a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
@@ -352,6 +352,9 @@ acl:
import json
import time
+from typing import Iterator
+from typing import List
+from typing import Tuple
try:
import botocore
@@ -372,48 +375,22 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
-def create_or_update_bucket(s3_client, module):
- policy = module.params.get("policy")
- name = module.params.get("name")
- requester_pays = module.params.get("requester_pays")
- tags = module.params.get("tags")
- purge_tags = module.params.get("purge_tags")
+def handle_bucket_versioning(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage versioning for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle versioning for.
+ Returns:
+ A tuple containing a boolean indicating whether versioning
+ was changed and a dictionary containing the updated versioning status.
+ """
versioning = module.params.get("versioning")
- encryption = module.params.get("encryption")
- encryption_key_id = module.params.get("encryption_key_id")
- bucket_key_enabled = module.params.get("bucket_key_enabled")
- public_access = module.params.get("public_access")
- delete_public_access = module.params.get("delete_public_access")
- delete_object_ownership = module.params.get("delete_object_ownership")
- object_ownership = module.params.get("object_ownership")
- object_lock_enabled = module.params.get("object_lock_enabled")
- acl = module.params.get("acl")
- # default to US Standard region,
- # note: module.region will also try to pull a default out of the boto3 configs.
- location = module.region or "us-east-1"
-
- changed = False
- result = {}
+ versioning_changed = False
+ versioning_status = {}
try:
- bucket_is_present = bucket_exists(s3_client, name)
- except botocore.exceptions.EndpointConnectionError as e:
- module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}")
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to check bucket presence")
-
- if not bucket_is_present:
- try:
- bucket_changed = create_bucket(s3_client, name, location, object_lock_enabled)
- s3_client.get_waiter("bucket_exists").wait(Bucket=name)
- changed = changed or bucket_changed
- except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, msg="An error occurred waiting for the bucket to become available")
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed while creating bucket")
-
- # Versioning
- try:
versioning_status = get_bucket_versioning(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
if versioning is not None:
@@ -438,19 +415,34 @@ def create_or_update_bucket(s3_client, module):
if required_versioning:
try:
put_bucket_versioning(s3_client, name, required_versioning)
- changed = True
+ versioning_changed = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to update bucket versioning")
versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
- # This output format is there to ensure compatibility with previous versions of the module
- result["versioning"] = {
+ versioning_result = {
"Versioning": versioning_status.get("Status", "Disabled"),
"MfaDelete": versioning_status.get("MFADelete", "Disabled"),
}
+ # This output format is there to ensure compatibility with previous versions of the module
+ return versioning_changed, versioning_result
- # Requester pays
+
+def handle_bucket_requester_pays(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage requester pays setting for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle requester pays setting for.
+ Returns:
+ A tuple containing a boolean indicating whether requester pays setting
+ was changed and a dictionary containing the updated requester pays status.
+ """
+ requester_pays = module.params.get("requester_pays")
+ requester_pays_changed = False
+ requester_pays_status = {}
try:
requester_pays_status = get_bucket_request_payment(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
@@ -476,11 +468,27 @@ def create_or_update_bucket(s3_client, module):
# account, so we retry one more time
put_bucket_request_payment(s3_client, name, payer)
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
- changed = True
+ requester_pays_changed = True
- result["requester_pays"] = requester_pays
+ return requester_pays_changed, requester_pays
+
+
+def handle_bucket_public_access_config(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage public access configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle public access configuration for.
+ Returns:
+ A tuple containing a boolean indicating whether public access configuration
+ was changed and a dictionary containing the updated public access configuration.
+ """
+ public_access = module.params.get("public_access")
+ delete_public_access = module.params.get("delete_public_access")
+ public_access_changed = False
+ public_access_result = {}
- # Public access clock configuration
current_public_access = {}
try:
current_public_access = get_bucket_public_access(s3_client, name)
@@ -502,22 +510,38 @@ def create_or_update_bucket(s3_client, module):
camel_public_block = snake_dict_to_camel_dict(public_access, capitalize_first=True)
if current_public_access == camel_public_block:
- result["public_access_block"] = current_public_access
+ public_access_result = current_public_access
else:
put_bucket_public_access(s3_client, name, camel_public_block)
- changed = True
- result["public_access_block"] = camel_public_block
+ public_access_changed = True
+ public_access_result = camel_public_block
# -- Delete public access block
if delete_public_access:
if current_public_access == {}:
- result["public_access_block"] = current_public_access
+ public_access_result = current_public_access
else:
delete_bucket_public_access(s3_client, name)
- changed = True
- result["public_access_block"] = {}
+ public_access_changed = True
+ public_access_result = {}
- # Policy
+ # Return the result
+ return public_access_changed, public_access_result
+
+
+def handle_bucket_policy(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage bucket policy for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle the policy for.
+ Returns:
+ A tuple containing a boolean indicating whether the bucket policy
+ was changed and a dictionary containing the updated bucket policy.
+ """
+ policy = module.params.get("policy")
+ policy_changed = False
try:
current_policy = get_bucket_policy(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
@@ -543,7 +567,7 @@ def create_or_update_bucket(s3_client, module):
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket policy")
current_policy = wait_policy_is_applied(module, s3_client, name, policy)
- changed = True
+ policy_changed = True
elif compare_policies(current_policy, policy):
try:
put_bucket_policy(s3_client, name, policy)
@@ -555,11 +579,26 @@ def create_or_update_bucket(s3_client, module):
# account, so we retry one more time
put_bucket_policy(s3_client, name, policy)
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
- changed = True
+ policy_changed = True
- result["policy"] = current_policy
+ return policy_changed, current_policy
+
+
+def handle_bucket_tags(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage tags for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle tags for.
+ Returns:
+ A tuple containing a boolean indicating whether tags were changed
+ and a dictionary containing the updated tags.
+ """
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ bucket_tags_changed = False
- # Tags
try:
current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
@@ -596,11 +635,27 @@ def create_or_update_bucket(s3_client, module):
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket tags")
current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
- changed = True
+ bucket_tags_changed = True
- result["tags"] = current_tags_dict
+ return bucket_tags_changed, current_tags_dict
+
+
+def handle_bucket_encryption(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage encryption settings for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle encryption for.
+ Returns:
+ A tuple containing a boolean indicating whether encryption settings
+ were changed and a dictionary containing the updated encryption settings.
+ """
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+ bucket_key_enabled = module.params.get("bucket_key_enabled")
+ encryption_changed = False
- # Encryption
try:
current_encryption = get_bucket_encryption(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
@@ -626,7 +681,7 @@ def create_or_update_bucket(s3_client, module):
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket encryption")
current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
- changed = True
+ encryption_changed = True
else:
if (encryption != current_encryption_algorithm) or (
encryption == "aws:kms" and current_encryption_key != encryption_key_id
@@ -635,24 +690,37 @@ def create_or_update_bucket(s3_client, module):
if encryption == "aws:kms" and encryption_key_id is not None:
expected_encryption.update({"KMSMasterKeyID": encryption_key_id})
current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption)
- changed = True
+ encryption_changed = True
if bucket_key_enabled is not None:
current_encryption_algorithm = current_encryption.get("SSEAlgorithm") if current_encryption else None
if current_encryption_algorithm == "aws:kms":
if get_bucket_key(s3_client, name) != bucket_key_enabled:
- if bucket_key_enabled:
- expected_encryption = True
- else:
- expected_encryption = False
+ expected_encryption = bool(bucket_key_enabled)
current_encryption = put_bucket_key_with_retry(module, s3_client, name, expected_encryption)
- changed = True
- result["encryption"] = current_encryption
+ encryption_changed = True
- # -- Bucket ownership
+ return encryption_changed, current_encryption
+
+
+def handle_bucket_ownership(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage ownership settings for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle ownership for.
+ Returns:
+ A tuple containing a boolean indicating whether ownership settings were changed
+ and a dictionary containing the updated ownership settings.
+ """
+ delete_object_ownership = module.params.get("delete_object_ownership")
+ object_ownership = module.params.get("object_ownership")
+ bucket_ownership_changed = False
+ bucket_ownership_result = {}
try:
bucket_ownership = get_bucket_ownership_cntrl(s3_client, name)
- result["object_ownership"] = bucket_ownership
+ bucket_ownership_result = bucket_ownership
except KeyError as e:
# Some non-AWS providers appear to return policy documents that aren't
# compatible with AWS, cleanly catch KeyError so users can continue to use
@@ -676,21 +744,36 @@ def create_or_update_bucket(s3_client, module):
# delete S3 buckect ownership
if bucket_ownership is not None:
delete_bucket_ownership(s3_client, name)
- changed = True
- result["object_ownership"] = None
+ bucket_ownership_changed = True
+ bucket_ownership_result = None
elif object_ownership is not None:
# update S3 bucket ownership
if bucket_ownership != object_ownership:
put_bucket_ownership(s3_client, name, object_ownership)
- changed = True
- result["object_ownership"] = object_ownership
+ bucket_ownership_changed = True
+ bucket_ownership_result = object_ownership
- # -- Bucket ACL
+ return bucket_ownership_changed, bucket_ownership_result
+
+
+def handle_bucket_acl(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage Access Control List (ACL) for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle ACL for.
+ Returns:
+ A tuple containing a boolean indicating whether ACL was changed and a dictionary containing the updated ACL.
+ """
+ acl = module.params.get("acl")
+ bucket_acl_changed = False
+ bucket_acl_result = {}
if acl:
try:
s3_client.put_bucket_acl(Bucket=name, ACL=acl)
- result["acl"] = acl
- changed = True
+ bucket_acl_result = acl
+ bucket_acl_changed = True
except KeyError as e:
# Some non-AWS providers appear to return policy documents that aren't
# compatible with AWS, cleanly catch KeyError so users can continue to use
@@ -706,17 +789,31 @@ def create_or_update_bucket(s3_client, module):
) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to update bucket ACL")
- # -- Object Lock
+ return bucket_acl_changed, bucket_acl_result
+
+
+def handle_bucket_object_lock(s3_client, module: AnsibleAWSModule, name: str) -> dict:
+ """
+ Manage object lock configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle object lock for.
+ Returns:
+ The updated object lock configuration.
+ """
+ object_lock_enabled = module.params.get("object_lock_enabled")
+ object_lock_result = {}
try:
object_lock_status = get_bucket_object_lock_enabled(s3_client, name)
- result["object_lock_enabled"] = object_lock_status
+ object_lock_result = object_lock_status
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
if object_lock_enabled is not None:
module.fail_json(msg="Fetching bucket object lock state is not supported")
except is_boto3_error_code("ObjectLockConfigurationNotFoundError"): # pylint: disable=duplicate-except
if object_lock_enabled:
module.fail_json(msg="Enabling object lock for existing buckets is not supported")
- result["object_lock_enabled"] = False
+ object_lock_result = False
except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except
if object_lock_enabled is not None:
module.fail_json(msg="Permission denied fetching object lock state for bucket")
@@ -732,21 +829,128 @@ def create_or_update_bucket(s3_client, module):
if object_lock_enabled and not object_lock_status:
module.fail_json(msg="Enabling object lock for existing buckets is not supported")
+ return object_lock_result
+
+
+def create_or_update_bucket(s3_client, module: AnsibleAWSModule):
+ """
+ Create or update an S3 bucket along with its associated configurations.
+ This function creates a new S3 bucket if it does not already exist, and updates its configurations,
+ such as versioning, requester pays, public access block configuration, policy, tags, encryption, bucket ownership,
+ ACL, and object lock settings. It returns whether any changes were made and the updated configurations.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ Returns:
+ None
+ """
+ name = module.params.get("name")
+ object_lock_enabled = module.params.get("object_lock_enabled")
+ # default to US Standard region,
+ # note: module.region will also try to pull a default out of the boto3 configs.
+ location = module.region or "us-east-1"
+
+ changed = False
+ result = {}
+
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ try:
+ bucket_changed = create_bucket(s3_client, name, location, object_lock_enabled)
+ s3_client.get_waiter("bucket_exists").wait(Bucket=name)
+ changed = changed or bucket_changed
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg="An error occurred waiting for the bucket to become available")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket")
+
+ # Versioning
+ versioning_changed, versioning_result = handle_bucket_versioning(s3_client, module, name)
+ result["versioning"] = versioning_result
+
+ # Requester pays
+ requester_pays_changed, requester_pays_result = handle_bucket_requester_pays(s3_client, module, name)
+ result["requester_pays"] = requester_pays_result
+
+ # Public access clock configuration
+ public_access_config_changed, public_access_config_result = handle_bucket_public_access_config(
+ s3_client, module, name
+ )
+ result["public_access_block"] = public_access_config_result
+
+ # Policy
+ policy_changed, current_policy = handle_bucket_policy(s3_client, module, name)
+ result["policy"] = current_policy
+
+ # Tags
+ tags_changed, current_tags_dict = handle_bucket_tags(s3_client, module, name)
+ result["tags"] = current_tags_dict
+
+ # Encryption
+ encryption_changed, current_encryption = handle_bucket_encryption(s3_client, module, name)
+ result["encryption"] = current_encryption
+
+ # -- Bucket ownership
+ bucket_ownership_changed, object_ownership_result = handle_bucket_ownership(s3_client, module, name)
+ result["object_ownership"] = object_ownership_result
+
+ # -- Bucket ACL
+ bucket_acl_changed, bucket_acl_result = handle_bucket_acl(s3_client, module, name)
+ result["acl"] = bucket_acl_result
+
+ # -- Object Lock
+ bucket_object_lock_result = handle_bucket_object_lock(s3_client, module, name)
+ result["object_lock_enabled"] = bucket_object_lock_result
+
# Module exit
+ changed = (
+ changed
+ or versioning_changed
+ or requester_pays_changed
+ or public_access_config_changed
+ or policy_changed
+ or tags_changed
+ or encryption_changed
+ or bucket_ownership_changed
+ or bucket_acl_changed
+ )
module.exit_json(changed=changed, name=name, **result)
-def bucket_exists(s3_client, bucket_name):
+def bucket_exists(s3_client, bucket_name: str) -> bool:
+ """
+ Checks if a given bucket exists in an AWS S3 account.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the bucket to check for existence.
+ Returns:
+ True if the bucket exists, False otherwise.
+ """
try:
s3_client.head_bucket(Bucket=bucket_name)
- bucket_exists = True
+ return True
except is_boto3_error_code("404"):
- bucket_exists = False
- return bucket_exists
+ return False
@AWSRetry.exponential_backoff(max_delay=120)
-def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False):
+def create_bucket(s3_client, bucket_name: str, location: str, object_lock_enabled: bool = False) -> bool:
+ """
+ Create an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the bucket to create.
+ location (str): The AWS region where the bucket should be created. If None, it defaults to "us-east-1".
+ object_lock_enabled (bool): Whether to enable object lock for the bucket. Defaults to False.
+ Returns:
+ True if the bucket was successfully created, False otherwise.
+ """
try:
params = {"Bucket": bucket_name}
@@ -770,22 +974,56 @@ def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_tagging(s3_client, bucket_name, tags):
+def put_bucket_tagging(s3_client, bucket_name: str, tags: dict):
+ """
+ Set tags for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ tags (dict): A dictionary containing the tags to be set on the bucket.
+ Returns:
+ None
+ """
s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": ansible_dict_to_boto3_tag_list(tags)})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_policy(s3_client, bucket_name, policy):
+def put_bucket_policy(s3_client, bucket_name: str, policy: dict):
+ """
+ Set the policy for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ policy (dict): A dictionary containing the policy to be set on the bucket.
+ Returns:
+ None
+ """
s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_policy(s3_client, bucket_name):
+def delete_bucket_policy(s3_client, bucket_name: str):
+ """
+ Delete the policy for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
s3_client.delete_bucket_policy(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_policy(s3_client, bucket_name):
+def get_bucket_policy(s3_client, bucket_name: str) -> str:
+ """
+ Get the policy for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Current bucket policy.
+ """
try:
current_policy_string = s3_client.get_bucket_policy(Bucket=bucket_name).get("Policy")
if not current_policy_string:
@@ -798,33 +1036,83 @@ def get_bucket_policy(s3_client, bucket_name):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_request_payment(s3_client, bucket_name, payer):
+def put_bucket_request_payment(s3_client, bucket_name: str, payer: str):
+ """
+ Set the request payment configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ payer (str): The entity responsible for charges related to fulfilling the request.
+ Returns:
+ None
+ """
s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={"Payer": payer})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_request_payment(s3_client, bucket_name):
+def get_bucket_request_payment(s3_client, bucket_name: str) -> str:
+ """
+ Get the request payment configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Payer of the download and request fees.
+ """
return s3_client.get_bucket_request_payment(Bucket=bucket_name).get("Payer")
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_versioning(s3_client, bucket_name):
+def get_bucket_versioning(s3_client, bucket_name: str) -> dict:
+ """
+ Get the versioning configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Returns the versioning state of a bucket.
+ """
return s3_client.get_bucket_versioning(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_versioning(s3_client, bucket_name, required_versioning):
+def put_bucket_versioning(s3_client, bucket_name: str, required_versioning: str):
+ """
+ Set the versioning configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ required_versioning (str): The desired versioning state for the bucket ("Enabled", "Suspended").
+ Returns:
+ None
+ """
s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={"Status": required_versioning})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_object_lock_enabled(s3_client, bucket_name):
+def get_bucket_object_lock_enabled(s3_client, bucket_name: str) -> bool:
+ """
+ Retrieve the object lock configuration status for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ True if object lock is enabled for the bucket, False otherwise.
+ """
object_lock_configuration = s3_client.get_object_lock_configuration(Bucket=bucket_name)
return object_lock_configuration["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled"
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_encryption(s3_client, bucket_name):
+def get_bucket_encryption(s3_client, bucket_name: str) -> dict:
+ """
+ Retrieve the encryption configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Encryption configuration of the bucket.
+ """
try:
result = s3_client.get_bucket_encryption(Bucket=bucket_name)
return (
@@ -839,7 +1127,15 @@ def get_bucket_encryption(s3_client, bucket_name):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_key(s3_client, bucket_name):
+def get_bucket_key(s3_client, bucket_name: str) -> bool:
+ """
+ Retrieve the status of server-side encryption for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Whether or not if server-side encryption is enabled for the bucket.
+ """
try:
result = s3_client.get_bucket_encryption(Bucket=bucket_name)
return result.get("ServerSideEncryptionConfiguration", {}).get("Rules", [])[0].get("BucketKeyEnabled")
@@ -849,7 +1145,17 @@ def get_bucket_key(s3_client, bucket_name):
return None
-def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption):
+def put_bucket_encryption_with_retry(module: AnsibleAWSModule, s3_client, name: str, expected_encryption: dict) -> dict:
+ """
+ Set the encryption configuration for an S3 bucket with retry logic.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ name (str): The name of the S3 bucket.
+ expected_encryption (dict): A dictionary containing the expected encryption configuration.
+ Returns:
+ Updated encryption configuration of the bucket.
+ """
max_retries = 3
for retries in range(1, max_retries + 1):
try:
@@ -877,14 +1183,33 @@ def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryptio
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_encryption(s3_client, bucket_name, encryption):
+def put_bucket_encryption(s3_client, bucket_name: str, encryption: dict) -> None:
+ """
+ Set the encryption configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ encryption (dict): A dictionary containing the encryption configuration.
+ Returns:
+ None
+ """
server_side_encryption_configuration = {"Rules": [{"ApplyServerSideEncryptionByDefault": encryption}]}
s3_client.put_bucket_encryption(
Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration
)
-def put_bucket_key_with_retry(module, s3_client, name, expected_encryption):
+def put_bucket_key_with_retry(module: AnsibleAWSModule, s3_client, name: str, expected_encryption: bool) -> dict:
+ """
+ Set the status of server-side encryption for an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ name (str): The name of the S3 bucket.
+ expected_encryption (bool): The expected status of server-side encryption using AWS KMS.
+ Returns:
+ The updated status of server-side encryption using AWS KMS for the bucket.
+ """
max_retries = 3
for retries in range(1, max_retries + 1):
try:
@@ -909,7 +1234,16 @@ def put_bucket_key_with_retry(module, s3_client, name, expected_encryption):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_key(s3_client, bucket_name, encryption):
+def put_bucket_key(s3_client, bucket_name: str, encryption: bool) -> None:
+ """
+ Set the status of server-side encryption for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ encryption (bool): The status of server-side encryption using AWS KMS.
+ Returns:
+ None
+ """
# server_side_encryption_configuration ={'Rules': [{'BucketKeyEnabled': encryption}]}
encryption_status = s3_client.get_bucket_encryption(Bucket=bucket_name)
encryption_status["ServerSideEncryptionConfiguration"]["Rules"][0]["BucketKeyEnabled"] = encryption
@@ -919,17 +1253,41 @@ def put_bucket_key(s3_client, bucket_name, encryption):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_tagging(s3_client, bucket_name):
+def delete_bucket_tagging(s3_client, bucket_name: str) -> None:
+ """
+ Delete the tagging configuration of an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
s3_client.delete_bucket_tagging(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_encryption(s3_client, bucket_name):
+def delete_bucket_encryption(s3_client, bucket_name: str) -> None:
+ """
+ Delete the encryption configuration of an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
s3_client.delete_bucket_encryption(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=["OperationAborted"])
-def delete_bucket(s3_client, bucket_name):
+def delete_bucket(s3_client, bucket_name: str) -> None:
+ """
+ Delete an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
try:
s3_client.delete_bucket(Bucket=bucket_name)
except is_boto3_error_code("NoSuchBucket"):
@@ -939,40 +1297,74 @@ def delete_bucket(s3_client, bucket_name):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_public_access(s3_client, bucket_name, public_acces):
+def put_bucket_public_access(s3_client, bucket_name: str, public_acces: dict) -> None:
"""
Put new public access block to S3 bucket
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ public_access (dict): The public access block configuration.
+ Returns:
+ None
"""
s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=public_acces)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_public_access(s3_client, bucket_name):
+def delete_bucket_public_access(s3_client, bucket_name: str) -> None:
"""
Delete public access block from S3 bucket
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
"""
s3_client.delete_public_access_block(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_ownership(s3_client, bucket_name):
+def delete_bucket_ownership(s3_client, bucket_name: str) -> None:
"""
Delete bucket ownership controls from S3 bucket
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
"""
s3_client.delete_bucket_ownership_controls(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_ownership(s3_client, bucket_name, target):
+def put_bucket_ownership(s3_client, bucket_name: str, target: str) -> None:
"""
Put bucket ownership controls for S3 bucket
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
"""
s3_client.put_bucket_ownership_controls(
Bucket=bucket_name, OwnershipControls={"Rules": [{"ObjectOwnership": target}]}
)
-def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
+def wait_policy_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, expected_policy: dict, should_fail: bool = True
+) -> dict:
+ """
+ Wait for a bucket policy to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_policy (dict): The expected bucket policy.
+ should_fail (bool): Flag indicating whether to fail if the policy is not applied within the expected time. Default is True.
+ Returns:
+ The current policy applied to the bucket, or None if the policy failed to apply within the expected time.
+ """
for dummy in range(0, 12):
try:
current_policy = get_bucket_policy(s3_client, bucket_name)
@@ -993,7 +1385,20 @@ def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, shou
return None
-def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
+def wait_payer_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, expected_payer: bool, should_fail=True
+) -> str:
+ """
+ Wait for the requester pays setting to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_payer (bool): The expected status of the requester pays setting.
+ should_fail (bool): Flag indicating whether to fail if the setting is not applied within the expected time. Default is True.
+ Returns:
+ The current status of the requester pays setting applied to the bucket.
+ """
for dummy in range(0, 12):
try:
requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
@@ -1013,7 +1418,21 @@ def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should
return None
-def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
+def wait_encryption_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, expected_encryption: dict, should_fail=True, retries=12
+) -> dict:
+ """
+ Wait for the encryption setting to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_encryption(dict): The expected encryption setting.
+ should_fail (bool): Flag indicating whether to fail if the setting is not applied within the expected time. Default is True.
+ retries (int): The number of retries to attempt. Default is 12.
+ Returns:
+ The current encryption setting applied to the bucket.
+ """
for dummy in range(0, retries):
try:
encryption = get_bucket_encryption(s3_client, bucket_name)
@@ -1034,7 +1453,21 @@ def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encrypti
return encryption
-def wait_bucket_key_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
+def wait_bucket_key_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, expected_encryption: bool, should_fail=True, retries=12
+) -> bool:
+ """
+ Wait for the bucket key setting to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_encryption (bool): The expected bucket key setting.
+ should_fail (bool): Flag indicating whether to fail if the setting is not applied within the expected time. Default is True.
+ retries (int): The number of retries to attempt. Default is 12.
+ Returns:
+ The current bucket key setting applied to the bucket.
+ """
for dummy in range(0, retries):
try:
encryption = get_bucket_key(s3_client, bucket_name)
@@ -1054,7 +1487,19 @@ def wait_bucket_key_is_applied(module, s3_client, bucket_name, expected_encrypti
return encryption
-def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
+def wait_versioning_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, required_versioning: dict
+) -> dict:
+ """
+ Wait for the versioning setting to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ required_versioning (dict): The required versioning status.
+ Returns:
+ The current versioning status applied to the bucket.
+ """
for dummy in range(0, 24):
try:
versioning_status = get_bucket_versioning(s3_client, bucket_name)
@@ -1071,7 +1516,17 @@ def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioni
)
-def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
+def wait_tags_are_applied(module: AnsibleAWSModule, s3_client, bucket_name: str, expected_tags_dict: dict) -> dict:
+ """
+ Wait for the tags to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_tags_dict (dict): The expected tags dictionary.
+ Returns:
+ The current tags dictionary applied to the bucket.
+ """
for dummy in range(0, 12):
try:
current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
@@ -1088,7 +1543,15 @@ def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
)
-def get_current_bucket_tags_dict(s3_client, bucket_name):
+def get_current_bucket_tags_dict(s3_client, bucket_name: str) -> dict:
+ """
+ Get the current tags applied to an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ The current tags dictionary applied to the bucket.
+ """
try:
current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get("TagSet")
except is_boto3_error_code("NoSuchTagSet"):
@@ -1100,9 +1563,14 @@ def get_current_bucket_tags_dict(s3_client, bucket_name):
return boto3_tag_list_to_ansible_dict(current_tags)
-def get_bucket_public_access(s3_client, bucket_name):
+def get_bucket_public_access(s3_client, bucket_name: str) -> dict:
"""
- Get current bucket public access block
+ Get current public access block configuration for a bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ The current public access block configuration for the bucket.
"""
try:
bucket_public_access_block = s3_client.get_public_access_block(Bucket=bucket_name)
@@ -1111,9 +1579,14 @@ def get_bucket_public_access(s3_client, bucket_name):
return {}
-def get_bucket_ownership_cntrl(s3_client, bucket_name):
+def get_bucket_ownership_cntrl(s3_client, bucket_name: str) -> str:
"""
- Get current bucket public access block
+ Get the current bucket ownership controls.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ The object ownership rule
"""
try:
bucket_ownership = s3_client.get_bucket_ownership_controls(Bucket=bucket_name)
@@ -1122,13 +1595,31 @@ def get_bucket_ownership_cntrl(s3_client, bucket_name):
return None
-def paginated_list(s3_client, **pagination_params):
+def paginated_list(s3_client, **pagination_params) -> Iterator[List[str]]:
+ """
+ Paginate through the list of objects in an S3 bucket.
+ This function yields the keys of objects in the S3 bucket, paginating through the results.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ **pagination_params: Additional parameters to pass to the paginator.
+ Yields:
+ list: A list of keys of objects in the bucket for each page of results.
+ """
pg = s3_client.get_paginator("list_objects_v2")
for page in pg.paginate(**pagination_params):
yield [data["Key"] for data in page.get("Contents", [])]
-def paginated_versions_list(s3_client, **pagination_params):
+def paginated_versions_list(s3_client, **pagination_params) -> Iterator[List[Tuple[str, str]]]:
+ """
+ Paginate through the list of object versions in an S3 bucket.
+ This function yields the keys and version IDs of object versions in the S3 bucket, paginating through the results.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ **pagination_params: Additional parameters to pass to the paginator.
+ Yields:
+ list: A list of tuples containing keys and version IDs of object versions in the bucket for each page of results.
+ """
try:
pg = s3_client.get_paginator("list_object_versions")
for page in pg.paginate(**pagination_params):
@@ -1140,7 +1631,48 @@ def paginated_versions_list(s3_client, **pagination_params):
yield []
-def destroy_bucket(s3_client, module):
+def delete_objects(s3_client, module: AnsibleAWSModule, name: str) -> None:
+ """
+ Delete objects from an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
+ try:
+ for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
+ formatted_keys = [{"Key": key, "VersionId": version} for key, version in key_version_pairs]
+ for fk in formatted_keys:
+ # remove VersionId from cases where they are `None` so that
+ # unversioned objects are deleted using `DeleteObject`
+ # rather than `DeleteObjectVersion`, improving backwards
+ # compatibility with older IAM policies.
+ if not fk.get("VersionId") or fk.get("VersionId") == "null":
+ fk.pop("VersionId")
+ if formatted_keys:
+ resp = s3_client.delete_objects(Bucket=name, Delete={"Objects": formatted_keys})
+ if resp.get("Errors"):
+ objects_to_delete = ", ".join([k["Key"] for k in resp["Errors"]])
+ module.fail_json(
+ msg=(f"Could not empty bucket before deleting. Could not delete objects: {objects_to_delete}"),
+ errors=resp["Errors"],
+ response=resp,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket")
+
+
+def destroy_bucket(s3_client, module: AnsibleAWSModule) -> None:
+ """
+ This function destroys an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ Returns:
+ None
+ """
force = module.params.get("force")
name = module.params.get("name")
try:
@@ -1156,29 +1688,9 @@ def destroy_bucket(s3_client, module):
if force:
# if there are contents then we need to delete them (including versions) before we can delete the bucket
try:
- for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
- formatted_keys = [{"Key": key, "VersionId": version} for key, version in key_version_pairs]
- for fk in formatted_keys:
- # remove VersionId from cases where they are `None` so that
- # unversioned objects are deleted using `DeleteObject`
- # rather than `DeleteObjectVersion`, improving backwards
- # compatibility with older IAM policies.
- if not fk.get("VersionId") or fk.get("VersionId") == "null":
- fk.pop("VersionId")
-
- if formatted_keys:
- resp = s3_client.delete_objects(Bucket=name, Delete={"Objects": formatted_keys})
- if resp.get("Errors"):
- objects_to_delete = ", ".join([k["Key"] for k in resp["Errors"]])
- module.fail_json(
- msg=(
- f"Could not empty bucket before deleting. Could not delete objects: {objects_to_delete}"
- ),
- errors=resp["Errors"],
- response=resp,
- )
+ delete_objects(s3_client, module, name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed while deleting bucket")
+ module.fail_json_aws(e, msg="Failed while deleting objects")
try:
delete_bucket(s3_client, name)
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object.py b/ansible_collections/amazon/aws/plugins/modules/s3_object.py
index 2cd897c89..0486d3b9f 100644
--- a/ansible_collections/amazon/aws/plugins/modules/s3_object.py
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_object.py
@@ -473,7 +473,7 @@ def key_check(module, s3, bucket, obj, version=None, validate=True):
def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content=None):
- s3_etag = get_etag(s3, bucket, obj, version=version)
+ s3_etag = _head_object(s3, bucket, obj, version=version).get("ETag")
if local_file is not None:
local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
else:
@@ -481,27 +481,49 @@ def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content
return s3_etag == local_etag
-def get_etag(s3, bucket, obj, version=None):
+def _head_object(s3, bucket, obj, version=None):
try:
if version:
key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)
else:
key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj)
if not key_check:
- return None
- return key_check["ETag"]
+ return {}
+ key_check.pop("ResponseMetadata")
+ return key_check
except is_boto3_error_code("404"):
- return None
+ return {}
+
+
+def _get_object_content(module, s3, bucket, obj, version=None):
+ try:
+ if version:
+ contents = s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)["Body"].read()
+ else:
+ contents = s3.get_object(aws_retry=True, Bucket=bucket, Key=obj)["Body"].read()
+ return contents
+ except is_boto3_error_code(["404", "403"]) as e:
+ # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
+ # user does not have the s3:GetObject permission.
+ module.fail_json_aws(e, msg=f"Could not find the key {obj}.")
+ except is_boto3_error_message("require AWS Signature Version 4"): # pylint: disable=duplicate-except
+ raise Sigv4Required()
+ except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Could not find the key {obj}.")
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ boto3.exceptions.Boto3Error,
+ ) as e: # pylint: disable=duplicate-except
+ raise S3ObjectFailure(f"Could not find the key {obj}.", e)
def get_s3_last_modified_timestamp(s3, bucket, obj, version=None):
- if version:
- key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)
- else:
- key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj)
- if not key_check:
- return None
- return key_check["LastModified"].timestamp()
+ last_modified = None
+ obj_info = _head_object(s3, bucket, obj, version)
+ if obj_info:
+ last_modified = obj_info["LastModified"].timestamp()
+ return last_modified
def is_local_object_latest(s3, bucket, obj, version=None, local_file=None):
@@ -550,22 +572,6 @@ def paginated_list(s3, **pagination_params):
yield data["Key"]
-def paginated_versioned_list_with_fallback(s3, **pagination_params):
- try:
- versioned_pg = s3.get_paginator("list_object_versions")
- for page in versioned_pg.paginate(**pagination_params):
- delete_markers = [
- {"Key": data["Key"], "VersionId": data["VersionId"]} for data in page.get("DeleteMarkers", [])
- ]
- current_objects = [
- {"Key": data["Key"], "VersionId": data["VersionId"]} for data in page.get("Versions", [])
- ]
- yield delete_markers + current_objects
- except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ["AccessDenied"]):
- for key in paginated_list(s3, **pagination_params):
- yield [{"Key": key}]
-
-
def list_keys(s3, bucket, prefix=None, marker=None, max_keys=None):
pagination_params = {
"Bucket": bucket,
@@ -779,29 +785,7 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
- try:
- # Note: Something of a permissions related hack
- # get_object returns the HEAD information, plus a *stream* which can be read.
- # because the stream's dropped on the floor, we never pull the data and this is the
- # functional equivalent of calling get_head which still relying on the 'GET' permission
- if version:
- s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)
- else:
- s3.get_object(aws_retry=True, Bucket=bucket, Key=obj)
- except is_boto3_error_code(["404", "403"]) as e:
- # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
- # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
- module.fail_json_aws(e, msg=f"Could not find the key {obj}.")
- except is_boto3_error_message("require AWS Signature Version 4"): # pylint: disable=duplicate-except
- raise Sigv4Required()
- except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg=f"Could not find the key {obj}.")
- except (
- botocore.exceptions.BotoCoreError,
- botocore.exceptions.ClientError,
- boto3.exceptions.Boto3Error,
- ) as e: # pylint: disable=duplicate-except
- raise S3ObjectFailure(f"Could not find the key {obj}.", e)
+ _get_object_content(module, s3, bucket, obj, version)
optional_kwargs = {"ExtraArgs": {"VersionId": version}} if version else {}
for x in range(0, retries + 1):
@@ -827,27 +811,8 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
def download_s3str(module, s3, bucket, obj, version=None):
if module.check_mode:
module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
- try:
- if version:
- contents = to_native(
- s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)["Body"].read()
- )
- else:
- contents = to_native(s3.get_object(aws_retry=True, Bucket=bucket, Key=obj)["Body"].read())
- module.exit_json(msg="GET operation complete", contents=contents, changed=True)
- except is_boto3_error_message("require AWS Signature Version 4"):
- raise Sigv4Required()
- except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except
- module.fail_json_aws(
- e,
- msg=f"Failed while getting contents of object {obj} as a string.",
- )
- except (
- botocore.exceptions.BotoCoreError,
- botocore.exceptions.ClientError,
- boto3.exceptions.Boto3Error,
- ) as e: # pylint: disable=duplicate-except
- raise S3ObjectFailure(f"Failed while getting contents of object {obj} as a string.", e)
+ contents = to_native(_get_object_content(module, s3, bucket, obj, version))
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
def get_download_url(module, s3, bucket, obj, expiry, tags=None, changed=True):
@@ -997,13 +962,13 @@ def ensure_tags(client, module, bucket, obj):
return current_tags_dict, changed
-def get_binary_content(vars):
+def get_binary_content(s3_vars):
# the content will be uploaded as a byte string, so we must encode it first
bincontent = None
- if vars.get("content"):
- bincontent = vars["content"].encode("utf-8")
- if vars.get("content_base64"):
- bincontent = base64.standard_b64decode(vars["content_base64"])
+ if s3_vars.get("content"):
+ bincontent = s3_vars["content"].encode("utf-8")
+ if s3_vars.get("content_base64"):
+ bincontent = base64.standard_b64decode(s3_vars["content_base64"])
return bincontent
@@ -1271,6 +1236,17 @@ def check_object_tags(module, connection, bucket, obj):
return diff
+def calculate_object_etag(module, s3, bucket, obj, head_etag, version=None):
+ etag = head_etag
+ if "-" in etag:
+ # object has been created using multipart upload, compute ETag using
+ # object content to ensure idempotency.
+ contents = _get_object_content(module, s3, bucket, obj, version)
+ # Set ETag to None, to force function to compute ETag from content
+ etag = calculate_etag_content(module, contents, None, s3, bucket, obj)
+ return etag
+
+
def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, src_bucket, src_obj, versionId=None):
try:
params = {"Bucket": bucket, "Key": obj}
@@ -1281,14 +1257,33 @@ def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate,
changed=False,
)
- s_etag = get_etag(s3, src_bucket, src_obj, version=versionId)
- d_etag = get_etag(s3, bucket, obj)
- if s_etag == d_etag:
+ s_obj_info = _head_object(s3, src_bucket, src_obj, version=versionId)
+ d_obj_info = _head_object(s3, bucket, obj)
+ do_match = True
+ diff_msg = None
+ if d_obj_info:
+ src_etag = calculate_object_etag(module, s3, src_bucket, src_obj, s_obj_info.get("ETag"), versionId)
+ dst_etag = calculate_object_etag(module, s3, bucket, obj, d_obj_info.get("ETag"))
+ if src_etag != dst_etag:
+ # Source and destination objects ETag differ
+ do_match = False
+ diff_msg = "ETag from source and destination differ"
+ if do_match and metadata and metadata != d_obj_info.get("Metadata"):
+ # Metadata from module inputs differs from what has been retrieved from object header
+ diff_msg = "Would have update object Metadata if not running in check mode."
+ do_match = False
+ else:
+ # The destination object does not exists
+ do_match = False
+ diff_msg = "Would have copy object if not running in check mode."
+
+ if do_match:
+ # S3 objects are equals, ensure tags will not be updated
if module.check_mode:
changed = check_object_tags(module, s3, bucket, obj)
result = {}
if changed:
- result.update({"msg": "Would have update object tags is not running in check mode."})
+ result.update({"msg": "Would have update object tags if not running in check mode."})
return changed, result
# Ensure tags
@@ -1297,8 +1292,9 @@ def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate,
if changed:
result = {"msg": "tags successfully updated.", "tags": tags}
return changed, result
- elif module.check_mode:
- return True, {"msg": "ETag from source and destination differ"}
+ # S3 objects differ
+ if module.check_mode:
+ return True, {"msg": diff_msg}
else:
changed = True
bucketsrc = {
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py
index 65bd5e328..39f0c2798 100644
--- a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py
@@ -741,8 +741,10 @@ def main():
result.append(object_details)
elif object_name is None:
object_list = list_bucket_objects(connection, module, bucket_name)
- for object in object_list:
- result.append(get_object_details(connection, module, bucket_name, object, requested_object_details))
+ for bucket_object in object_list:
+ result.append(
+ get_object_details(connection, module, bucket_name, bucket_object, requested_object_details)
+ )
elif not requested_object_details and object_name:
# if specific details are not requested, return object metadata