diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-18 05:52:35 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-18 05:52:35 +0000 |
commit | 7fec0b69a082aaeec72fee0612766aa42f6b1b4d (patch) | |
tree | efb569b86ca4da888717f5433e757145fa322e08 /ansible_collections/amazon/aws/plugins | |
parent | Releasing progress-linux version 7.7.0+dfsg-3~progress7.99u1. (diff) | |
download | ansible-7fec0b69a082aaeec72fee0612766aa42f6b1b4d.tar.xz ansible-7fec0b69a082aaeec72fee0612766aa42f6b1b4d.zip |
Merging upstream version 9.4.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/amazon/aws/plugins')
165 files changed, 26014 insertions, 15576 deletions
diff --git a/ansible_collections/amazon/aws/plugins/action/s3_object.py b/ansible_collections/amazon/aws/plugins/action/s3_object.py index a78dd0bed..f78a42fa3 100644 --- a/ansible_collections/amazon/aws/plugins/action/s3_object.py +++ b/ansible_collections/amazon/aws/plugins/action/s3_object.py @@ -1,50 +1,38 @@ +# -*- coding: utf-8 -*- + # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # (c) 2018, Will Thames <will@thames.id.au> -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) import os -from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound +from ansible.errors import AnsibleAction +from ansible.errors import AnsibleActionFail +from ansible.errors import AnsibleError +from ansible.errors import AnsibleFileNotFound from ansible.module_utils._text import to_text from ansible.plugins.action import ActionBase from ansible.utils.vars import merge_hash class ActionModule(ActionBase): - TRANSFERS_FILES = True def run(self, tmp=None, task_vars=None): - ''' handler for s3_object operations + """handler for s3_object operations This adds the magic that means 'src' can point to both a 'remote' file on the 'host' or in the 'files/' lookup path on the controller. - ''' + """ self._supports_async = True if task_vars is None: task_vars = dict() - result = super(ActionModule, self).run(tmp, task_vars) + result = super().run(tmp, task_vars) del tmp # tmp no longer has any effect - source = self._task.args.get('src', None) + source = self._task.args.get("src", None) try: new_module_args = self._task.args.copy() @@ -54,17 +42,19 @@ class ActionModule(ActionBase): # For backward compatibility check if the file exists on the remote; it should take precedence if not self._remote_file_exists(source): try: - source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False) - new_module_args['src'] = source + source = self._loader.get_real_file(self._find_needle("files", source), decrypt=False) + new_module_args["src"] = source except AnsibleFileNotFound: # module handles error message for nonexistent files - new_module_args['src'] = source + new_module_args["src"] = source except AnsibleError as e: raise AnsibleActionFail(to_text(e)) wrap_async = self._task.async_val and not self._connection.has_native_async # execute the s3_object module with the updated args - result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async)) + result = merge_hash( + result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async) + ) if not wrap_async: # remove a temporary path we created diff --git a/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py b/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py index 551a866a3..fa3a155ff 100644 --- a/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py +++ b/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py @@ -1,11 +1,9 @@ +# -*- coding: utf-8 -*- + # (C) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = """ name: aws_resource_actions type: aggregate short_description: summarizes all "resource:actions" completed @@ -15,43 +13,37 @@ DOCUMENTATION = ''' be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults. requirements: - whitelisting in configuration - see examples section below for details. -''' +""" -EXAMPLES = ''' +EXAMPLES = """ example: > To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = aws_resource_actions sample output: > -# -# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload', -# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload', -# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject'] -# -sample output: > -# -# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags', -# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc'] -# -''' + # + # AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload', + # 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload', + # 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject'] +""" -from ansible.plugins.callback import CallbackBase from ansible.module_utils._text import to_native +from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.8 - CALLBACK_TYPE = 'aggregate' - CALLBACK_NAME = 'amazon.aws.aws_resource_actions' + CALLBACK_TYPE = "aggregate" + CALLBACK_NAME = "amazon.aws.aws_resource_actions" CALLBACK_NEEDS_WHITELIST = True def __init__(self): self.aws_resource_actions = [] - super(CallbackModule, self).__init__() + super().__init__() def extend_aws_resource_actions(self, result): - if result.get('resource_actions'): - self.aws_resource_actions.extend(result['resource_actions']) + if result.get("resource_actions"): + self.aws_resource_actions.extend(result["resource_actions"]) def runner_on_ok(self, host, res): self.extend_aws_resource_actions(res) @@ -68,4 +60,4 @@ class CallbackModule(CallbackBase): def playbook_on_stats(self, stats): if self.aws_resource_actions: self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions))) - self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions)) + self._display.display(f"AWS ACTIONS: {self.aws_resource_actions}") diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/assume_role.py b/ansible_collections/amazon/aws/plugins/doc_fragments/assume_role.py new file mode 100644 index 000000000..0aac10a89 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/assume_role.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2022, Ansible, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment: + # Note: If you're updating MODULES, PLUGINS probably needs updating too. + + # Formatted for Modules + # - modules don't support 'env' + MODULES = r""" +options: {} +""" + + # Formatted for non-module plugins + # - modules don't support 'env' + PLUGINS = r""" +options: + assume_role_arn: + description: + - The ARN of the IAM role to assume to perform the lookup. + - You should still provide AWS credentials with enough privilege to perform the AssumeRole action. + aliases: ["iam_role_arn"] +""" diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py index eeff899c6..13a72a910 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py @@ -1,143 +1,16 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2014, Will Thames <will@thames.id.au> +# (c) 2022 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from .common import ModuleDocFragment as CommonFragment +# +# The amazon.aws.aws docs fragment has been deprecated, +# please migrate to amazon.aws.common.modules. +# -class ModuleDocFragment(object): - # AWS only documentation fragment - DOCUMENTATION = r''' -options: - access_key: - description: - - AWS access key ID. - - See the AWS documentation for more information about access tokens - U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). - - The C(AWS_ACCESS_KEY_ID), C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY) - environment variables may also be used in decreasing order of - preference. Prior to release 6.0.0 these environment variables will be - ignored if the I(profile) parameter is passed. After release 6.0.0 - I(access_key) will always fall back to the environment variables if set. - - The I(aws_access_key) and I(profile) options are mutually exclusive. - - The I(aws_access_key_id) alias was added in release 5.1.0 for - consistency with the AWS botocore SDK. - - The I(ec2_access_key) alias has been deprecated and will be removed in a - release after 2024-12-01. - - Support for the C(EC2_ACCESS_KEY) environment variable has been - deprecated and will be removed in a release after 2024-12-01. - type: str - aliases: ['aws_access_key_id', 'aws_access_key', 'ec2_access_key'] - secret_key: - description: - - AWS secret access key. - - See the AWS documentation for more information about access tokens - U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). - - The C(AWS_SECRET_ACCESS_KEY), C(AWS_SECRET_KEY), or C(EC2_SECRET_KEY) - environment variables may also be used in decreasing order of - preference. Prior to release 6.0.0 these environment variables will be - ignored if the I(profile) parameter is passed. After release 6.0.0 - I(secret_key) will always fall back to the environment variables if set. - - The I(secret_key) and I(profile) options are mutually exclusive. - - The I(aws_secret_access_key) alias was added in release 5.1.0 for - consistency with the AWS botocore SDK. - - The I(ec2_secret_key) alias has been deprecated and will be removed in a - release after 2024-12-01. - - Support for the C(EC2_SECRET_KEY) environment variable has been - deprecated and will be removed in a release after 2024-12-01. - type: str - aliases: ['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'] - session_token: - description: - - AWS STS session token for use with temporary credentials. - - See the AWS documentation for more information about access tokens - U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). - - The C(AWS_SESSION_TOKEN), C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN) - environment variables may also be used in decreasing order of preference. - Prior to release 6.0.0 these environment variables will be - ignored if the I(profile) parameter is passed. After release 6.0.0 - I(session_token) will always fall back to the environment variables if set. - - The I(security_token) and I(profile) options are mutually exclusive. - - Aliases I(aws_session_token) and I(session_token) were added in release - 3.2.0, with the parameter being renamed from I(security_token) to - I(session_token) in release 6.0.0. - - The I(security_token), I(aws_security_token), and I(access_token) - aliases have been deprecated and will be removed in a release after - 2024-12-01. - - Support for the C(EC2_SECRET_KEY) and C(AWS_SECURITY_TOKEN) environment - variables has been deprecated and will be removed in a release after - 2024-12-01. - type: str - aliases: ['aws_session_token', 'security_token', 'aws_security_token', 'access_token'] - profile: - description: - - A named AWS profile to use for authentication. - - See the AWS documentation for more information about named profiles - U(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). - - The C(AWS_PROFILE) environment variable may also be used. Prior to release 6.0.0 the - C(AWS_PROFILE) environment variable will be ignored if any of I(access_key), I(secret_key), - or I(session_token) are passed. After release 6.0.0 I(profile) will always fall back to the - C(AWS_PROFILE) environment variable if set. - - The I(profile) option is mutually exclusive with the I(aws_access_key), - I(aws_secret_key) and I(security_token) options. - type: str - aliases: ['aws_profile'] - - endpoint_url: - description: - - URL to connect to instead of the default AWS endpoints. While this - can be used to connection to other AWS-compatible services the - amazon.aws and community.aws collections are only tested against - AWS. - - The C(AWS_URL) or C(EC2_URL) environment variables may also be used, - in decreasing order of preference. - - The I(ec2_url) and I(s3_url) aliases have been deprecated and will be - removed in a release after 2024-12-01. - - Support for the C(EC2_URL) environment variable has been deprecated and - will be removed in a release after 2024-12-01. - type: str - aliases: ['ec2_url', 'aws_endpoint_url', 's3_url' ] - aws_ca_bundle: - description: - - The location of a CA Bundle to use when validating SSL certificates. - - The C(AWS_CA_BUNDLE) environment variable may also be used. - type: path - validate_certs: - description: - - When set to C(false), SSL certificates will not be validated for - communication with the AWS APIs. - - Setting I(validate_certs=false) is strongly discouraged, as an - alternative, consider setting I(aws_ca_bundle) instead. - type: bool - default: true - aws_config: - description: - - A dictionary to modify the botocore configuration. - - Parameters can be found in the AWS documentation - U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config). - type: dict - debug_botocore_endpoint_logs: - description: - - Use a C(botocore.endpoint) logger to parse the unique (rather than total) - C("resource:action") API calls made during a task, outputing the set to - the resource_actions key in the task results. Use the - C(aws_resource_action) callback to output to total list made during - a playbook. - - The C(ANSIBLE_DEBUG_BOTOCORE_LOGS) environment variable may also be used. - type: bool - default: false -notes: - - B(Caution:) For modules, environment variables and configuration files are - read from the Ansible 'host' context and not the 'controller' context. - As such, files may need to be explicitly copied to the 'host'. For lookup - and connection plugins, environment variables and configuration files are - read from the Ansible 'controller' context and not the 'host' context. - - The AWS SDK (boto3) that Ansible uses may also read defaults for credentials - and other settings, such as the region, from its configuration files in the - Ansible 'host' context (typically C(~/.aws/credentials)). - See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) - for more information. -''' +class ModuleDocFragment: + def __init__(self): + self.DOCUMENTATION = CommonFragment.MODULES diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py index 73eff046e..96295a1f5 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py @@ -3,14 +3,15 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# +# The amazon.aws.aws_credentials docs fragment has been deprecated, +# please migrate to amazon.aws.common.plugins. +# -class ModuleDocFragment(object): - +class ModuleDocFragment: # Plugin options for AWS credentials - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: aws_profile: description: The AWS profile @@ -25,6 +26,11 @@ options: aliases: [ aws_access_key_id ] env: - name: EC2_ACCESS_KEY + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_ACCESS_KEY_ID - name: AWS_ACCESS_KEY - name: AWS_ACCESS_KEY_ID aws_secret_key: @@ -33,6 +39,11 @@ options: aliases: [ aws_secret_access_key ] env: - name: EC2_SECRET_KEY + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_SECRET_ACCESS_KEY - name: AWS_SECRET_KEY - name: AWS_SECRET_ACCESS_KEY aws_security_token: @@ -40,6 +51,16 @@ options: type: str env: - name: EC2_SECURITY_TOKEN + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_SESSION_TOKEN - name: AWS_SESSION_TOKEN - name: AWS_SECURITY_TOKEN -''' + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'AWS_SECURITY_TOKEN was used for compatibility with the original boto SDK, support for which has been dropped' + alternatives: AWS_SESSION_TOKEN +""" diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py index 521526601..e247f8090 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py @@ -1,21 +1,16 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project +# (c) 2022 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from .region import ModuleDocFragment as RegionFragment +# +# The amazon.aws.aws_region docs fragment has been deprecated, +# please migrate to amazon.aws.region.plugins. +# -class ModuleDocFragment(object): - # Plugin option for AWS region - DOCUMENTATION = r''' -options: - region: - description: The region for which to create the connection. - type: str - env: - - name: EC2_REGION - - name: AWS_REGION -''' +class ModuleDocFragment: + def __init__(self): + self.DOCUMENTATION = RegionFragment.PLUGINS diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py b/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py index a88e2e018..77bf98687 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py @@ -1,19 +1,23 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2022, Ansible Project +# Copyright: (c) 2022, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from ansible_collections.amazon.aws.plugins.module_utils import botocore as botocore_utils -class ModuleDocFragment(object): - - # Minimum requirements for the collection - DOCUMENTATION = r''' -options: {} +class ModuleDocFragment: + # Modules and Plugins can (currently) use the same fragment + def __init__(self): + # Minimum requirements for the collection + requirements = f""" +options: {{}} requirements: - python >= 3.6 - - boto3 >= 1.18.0 - - botocore >= 1.21.0 -''' + - boto3 >= {botocore_utils.MINIMUM_BOTO3_VERSION} + - botocore >= {botocore_utils.MINIMUM_BOTOCORE_VERSION} +""" + + self.DOCUMENTATION = requirements + self.MODULES = requirements + self.PLUGINS = requirements diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/common.py b/ansible_collections/amazon/aws/plugins/doc_fragments/common.py new file mode 100644 index 000000000..3080b1629 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/common.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Will Thames <will@thames.id.au> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment: + # Common configuration for all AWS services + # Note: If you're updating MODULES, PLUGINS probably needs updating too. + + # Formatted for Modules + # - modules don't support 'env' + MODULES = r""" +options: + access_key: + description: + - AWS access key ID. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_ACCESS_KEY_ID), C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY) + environment variables may also be used in decreasing order of + preference. + - The I(aws_access_key) and I(profile) options are mutually exclusive. + - The I(aws_access_key_id) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_access_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + - Support for the C(EC2_ACCESS_KEY) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: ['aws_access_key_id', 'aws_access_key', 'ec2_access_key'] + secret_key: + description: + - AWS secret access key. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_SECRET_ACCESS_KEY), C(AWS_SECRET_KEY), or C(EC2_SECRET_KEY) + environment variables may also be used in decreasing order of + preference. + - The I(secret_key) and I(profile) options are mutually exclusive. + - The I(aws_secret_access_key) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_secret_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + - Support for the C(EC2_SECRET_KEY) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: ['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'] + session_token: + description: + - AWS STS session token for use with temporary credentials. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_SESSION_TOKEN), C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN) + environment variables may also be used in decreasing order of preference. + - The I(security_token) and I(profile) options are mutually exclusive. + - Aliases I(aws_session_token) and I(session_token) were added in release + 3.2.0, with the parameter being renamed from I(security_token) to + I(session_token) in release 6.0.0. + - The I(security_token), I(aws_security_token), and I(access_token) + aliases have been deprecated and will be removed in a release after + 2024-12-01. + - Support for the C(EC2_SECRET_KEY) and C(AWS_SECURITY_TOKEN) environment + variables has been deprecated and will be removed in a release after + 2024-12-01. + type: str + aliases: ['aws_session_token', 'security_token', 'aws_security_token', 'access_token'] + profile: + description: + - A named AWS profile to use for authentication. + - See the AWS documentation for more information about named profiles + U(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). + - The C(AWS_PROFILE) environment variable may also be used. + - The I(profile) option is mutually exclusive with the I(aws_access_key), + I(aws_secret_key) and I(security_token) options. + type: str + aliases: ['aws_profile'] + + endpoint_url: + description: + - URL to connect to instead of the default AWS endpoints. While this + can be used to connection to other AWS-compatible services the + amazon.aws and community.aws collections are only tested against + AWS. + - The C(AWS_URL) or C(EC2_URL) environment variables may also be used, + in decreasing order of preference. + - The I(ec2_url) and I(s3_url) aliases have been deprecated and will be + removed in a release after 2024-12-01. + - Support for the C(EC2_URL) environment variable has been deprecated and + will be removed in a release after 2024-12-01. + type: str + aliases: ['ec2_url', 'aws_endpoint_url', 's3_url' ] + + aws_ca_bundle: + description: + - The location of a CA Bundle to use when validating SSL certificates. + - The C(AWS_CA_BUNDLE) environment variable may also be used. + type: path + validate_certs: + description: + - When set to C(false), SSL certificates will not be validated for + communication with the AWS APIs. + - Setting I(validate_certs=false) is strongly discouraged, as an + alternative, consider setting I(aws_ca_bundle) instead. + type: bool + default: true + aws_config: + description: + - A dictionary to modify the botocore configuration. + - Parameters can be found in the AWS documentation + U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config). + type: dict + debug_botocore_endpoint_logs: + description: + - Use a C(botocore.endpoint) logger to parse the unique (rather than total) + C("resource:action") API calls made during a task, outputing the set to + the resource_actions key in the task results. Use the + C(aws_resource_action) callback to output to total list made during + a playbook. + - The C(ANSIBLE_DEBUG_BOTOCORE_LOGS) environment variable may also be used. + type: bool + default: false +notes: + - B(Caution:) For modules, environment variables and configuration files are + read from the Ansible 'host' context and not the 'controller' context. + As such, files may need to be explicitly copied to the 'host'. For lookup + and connection plugins, environment variables and configuration files are + read from the Ansible 'controller' context and not the 'host' context. + - The AWS SDK (boto3) that Ansible uses may also read defaults for credentials + and other settings, such as the region, from its configuration files in the + Ansible 'host' context (typically C(~/.aws/credentials)). + See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) + for more information. +""" + + # Formatted for non-module plugins + # - modules don't support 'env' + PLUGINS = r""" +options: + access_key: + description: + - AWS access key ID. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The I(aws_access_key) and I(profile) options are mutually exclusive. + - The I(aws_access_key_id) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_access_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + type: str + aliases: ['aws_access_key_id', 'aws_access_key', 'ec2_access_key'] + env: + - name: AWS_ACCESS_KEY_ID + - name: AWS_ACCESS_KEY + - name: EC2_ACCESS_KEY + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_ACCESS_KEY_ID + secret_key: + description: + - AWS secret access key. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The I(secret_key) and I(profile) options are mutually exclusive. + - The I(aws_secret_access_key) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_secret_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + type: str + aliases: ['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'] + env: + - name: AWS_SECRET_ACCESS_KEY + - name: AWS_SECRET_KEY + - name: EC2_SECRET_KEY + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_SECRET_ACCESS_KEY + session_token: + description: + - AWS STS session token for use with temporary credentials. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The I(security_token) and I(profile) options are mutually exclusive. + - Aliases I(aws_session_token) and I(session_token) were added in release + 3.2.0, with the parameter being renamed from I(security_token) to + I(session_token) in release 6.0.0. + - The I(security_token), I(aws_security_token), and I(access_token) + aliases have been deprecated and will be removed in a release after + 2024-12-01. + type: str + aliases: ['aws_session_token', 'security_token', 'aws_security_token', 'access_token'] + env: + - name: AWS_SESSION_TOKEN + - name: AWS_SECURITY_TOKEN + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'AWS_SECURITY_TOKEN was used for compatibility with the original boto SDK, support for which has been dropped' + alternatives: AWS_SESSION_TOKEN + - name: EC2_SECURITY_TOKEN + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_SESSION_TOKEN + + profile: + description: + - A named AWS profile to use for authentication. + - See the AWS documentation for more information about named profiles + U(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). + - The I(profile) option is mutually exclusive with the I(aws_access_key), + I(aws_secret_key) and I(security_token) options. + - The I(boto_profile) alias has been deprecated and will be removed in a + release after 2024-12-01. + type: str + aliases: ['aws_profile', 'boto_profile'] + env: + - name: AWS_PROFILE + - name: AWS_DEFAULT_PROFILE + endpoint_url: + description: + - URL to connect to instead of the default AWS endpoints. While this + can be used to connection to other AWS-compatible services the + amazon.aws and community.aws collections are only tested against + AWS. + - The I(endpoint) alias has been deprecated and will be + removed in a release after 2024-12-01. + type: str + aliases: ['aws_endpoint_url', 'endpoint' ] + env: + - name: AWS_URL + - name: EC2_URL + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_URL + +notes: + - B(Caution:) For modules, environment variables and configuration files are + read from the Ansible 'host' context and not the 'controller' context. + As such, files may need to be explicitly copied to the 'host'. For lookup + and connection plugins, environment variables and configuration files are + read from the Ansible 'controller' context and not the 'host' context. + - The AWS SDK (boto3) that Ansible uses may also read defaults for credentials + and other settings, such as the region, from its configuration files in the + Ansible 'host' context (typically C(~/.aws/credentials)). + See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) + for more information. +""" diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py b/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py index 017652b58..839b6cff8 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py @@ -1,30 +1,16 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2015, Ansible, Inc +# (c) 2022 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from .region import ModuleDocFragment as RegionFragment +# +# The amazon.aws.ec2 docs fragment has been deprecated, +# please migrate to amazon.aws.region.modules. +# -class ModuleDocFragment(object): - # EC2 only documentation fragment - DOCUMENTATION = r''' -options: - region: - description: - - The AWS region to use. - - For global services such as IAM, Route53 and CloudFront, I(region) - is ignored. - - The C(AWS_REGION) or C(EC2_REGION) environment variables may also - be used. - - See the Amazon AWS documentation for more information - U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). - - The C(ec2_region) alias has been deprecated and will be removed in - a release after 2024-12-01 - - Support for the C(EC2_REGION) environment variable has been - deprecated and will be removed in a release after 2024-12-01. - type: str - aliases: [ aws_region, ec2_region ] -''' +class ModuleDocFragment: + def __init__(self): + self.DOCUMENTATION = RegionFragment.MODULES diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/region.py b/ansible_collections/amazon/aws/plugins/doc_fragments/region.py new file mode 100644 index 000000000..49592391c --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/region.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Ansible, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment: + # Common configuration for all AWS services + # Note: If you're updating MODULES, PLUGINS probably needs updating too. + + # Formatted for Modules + # - modules don't support 'env' + MODULES = r""" +options: + region: + description: + - The AWS region to use. + - For global services such as IAM, Route53 and CloudFront, I(region) + is ignored. + - The C(AWS_REGION) or C(EC2_REGION) environment variables may also + be used. + - See the Amazon AWS documentation for more information + U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). + - The C(ec2_region) alias has been deprecated and will be removed in + a release after 2024-12-01 + - Support for the C(EC2_REGION) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: [ aws_region, ec2_region ] +""" + + # Formatted for non-module plugins + # - modules don't support 'env' + PLUGINS = r""" +options: + region: + description: + - The AWS region to use. + - See the Amazon AWS documentation for more information + U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). + type: str + aliases: [ aws_region, ec2_region ] + env: + - name: AWS_REGION + - name: EC2_REGION + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources, when it is used for all connections' +""" diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py b/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py index 9d381cb8a..afd29dedf 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py @@ -3,14 +3,10 @@ # Copyright: (c) 2022, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): +class ModuleDocFragment: # Standard Tagging related parameters - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: tags: description: @@ -32,31 +28,9 @@ options: type: bool default: true required: false -''' +""" - # Some modules had a default of purge_tags=False, this was generally - # deprecated in release 4.0.0 - DEPRECATED_PURGE = r''' -options: - tags: - description: - - A dictionary representing the tags to be applied to the resource. - - If the I(tags) parameter is not set then tags will not be modified. - type: dict - required: false - aliases: ['resource_tags'] - purge_tags: - description: - - If I(purge_tags=true) and I(tags) is set, existing tags will be purged - from the resource to match exactly what is defined by I(tags) parameter. - - If the I(tags) parameter is not set then tags will not be modified, even - if I(purge_tags=True). - - Tag keys beginning with C(aws:) are reserved by Amazon and can not be - modified. As such they will be ignored for the purposes of the - I(purge_tags) parameter. See the Amazon documentation for more information - U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions). - - The current default value of C(False) has been deprecated. The default - value will change to C(True) in release 5.0.0. - type: bool - required: false -''' + # Modules and Plugins can (currently) use the same fragment + def __init__(self): + self.MODULES = self.DOCUMENTATION + self.PLUGINS = self.DOCUMENTATION diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py index f1d069b5b..8b9796b7f 100644 --- a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py +++ b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py @@ -1,17 +1,18 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: aws_ec2 short_description: EC2 inventory source extends_documentation_fragment: - inventory_cache - constructed - amazon.aws.boto3 - - amazon.aws.aws_credentials + - amazon.aws.common.plugins + - amazon.aws.region.plugins + - amazon.aws.assume_role.plugins description: - Get inventory hosts from Amazon Web Services EC2. - "The inventory file is a YAML configuration file and must end with C(aws_ec2.{yml|yaml}). Example: C(my_inventory.aws_ec2.yml)." @@ -21,14 +22,6 @@ notes: author: - Sloane Hertel (@s-hertel) options: - plugin: - description: Token that ensures this is a source file for the plugin. - required: True - choices: ['aws_ec2', 'amazon.aws.aws_ec2'] - iam_role_arn: - description: - - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS - credentials with enough privilege to perform the AssumeRole action. regions: description: - A list of regions in which to describe EC2 instances. @@ -39,16 +32,17 @@ options: hostnames: description: - A list in order of precedence for hostname variables. + - The elements of the list can be a dict with the keys mentioned below or a string. + - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + - If value provided does not exist in the above options, it will be used as a literal string. + - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. type: list - elements: dict + elements: raw default: [] suboptions: name: description: - Name of the host. - - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). - - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. - - If value provided does not exist in the above options, it will be used as a literal string. type: str required: True prefix: @@ -142,35 +136,47 @@ options: - The suffix for host variables names coming from AWS. type: str version_added: 3.1.0 -''' + use_ssm_inventory: + description: + - Enables fetching additional EC2 instance information from the AWS Systems Manager (SSM) inventory service into hostvars. + - By leveraging the SSM inventory data, the I(use_ssm_inventory) option provides additional details and attributes + about the EC2 instances in your inventory. These details can include operating system information, installed software, + network configurations, and custom inventory attributes defined in SSM. + type: bool + default: False + version_added: 6.0.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Minimal example using environment vars or instance role credentials # Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address plugin: amazon.aws.aws_ec2 regions: - us-east-1 +--- + # Example using filters, ignoring permission errors, and specifying the hostname precedence plugin: amazon.aws.aws_ec2 # The values for profile, access key, secret key and token can be hardcoded like: -boto_profile: aws_profile +profile: aws_profile # or you could use Jinja as: -# boto_profile: "{{ lookup('env', 'AWS_PROFILE') | default('aws_profile', true) }}" +# profile: "{{ lookup('env', 'AWS_PROFILE') | default('aws_profile', true) }}" # Populate inventory with instances in these regions regions: - us-east-1 - us-east-2 filters: - # All instances with their `Environment` tag set to `dev` - tag:Environment: dev + ## All instances with their `Environment` tag set to `dev` + # tag:Environment: dev + # All dev and QA hosts tag:Environment: - dev - qa instance.group-id: sg-xxxxxxxx # Ignores 403 errors rather than failing -strict_permissions: False +strict_permissions: false # Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying # inventory_hostname use compose (see example below). hostnames: @@ -186,7 +192,9 @@ hostnames: prefix: 'aws' # Returns all the hostnames for a given instance -allow_duplicated_hosts: False +allow_duplicated_hosts: false + +--- # Example using constructed features to create groups and set ansible_host plugin: amazon.aws.aws_ec2 @@ -194,7 +202,7 @@ regions: - us-east-1 - us-west-1 # keyed_groups may be used to create custom groups -strict: False +strict: false keyed_groups: # Add e.g. x86_64 hosts to an arch_x86_64 group - prefix: arch @@ -224,23 +232,27 @@ compose: # (note: this does not modify inventory_hostname, which is set via I(hostnames)) ansible_host: private_ip_address +--- + # Example using include_filters and exclude_filters to compose the inventory. plugin: amazon.aws.aws_ec2 regions: - us-east-1 - us-west-1 include_filters: -- tag:Name: - - 'my_second_tag' -- tag:Name: - - 'my_third_tag' + - tag:Name: + - 'my_second_tag' + - tag:Name: + - 'my_third_tag' exclude_filters: -- tag:Name: - - 'my_first_tag' + - tag:Name: + - 'my_first_tag' + +--- # Example using groups to assign the running hosts to a group based on vpc_id plugin: amazon.aws.aws_ec2 -boto_profile: aws_profile +profile: aws_profile # Populate inventory with instances in these regions regions: - us-east-2 @@ -254,420 +266,353 @@ compose: ansible_host: public_dns_name groups: libvpc: vpc_id == 'vpc-####' + +--- + # Define prefix and suffix for host variables coming from AWS. plugin: amazon.aws.aws_ec2 regions: - us-east-1 hostvars_prefix: 'aws_' hostvars_suffix: '_ec2' -''' +""" import re try: - import boto3 import botocore except ImportError: pass # will be captured by imported HAS_BOTO3 -from ansible.errors import AnsibleError -from ansible.module_utils._text import to_native from ansible.module_utils._text import to_text -from ansible.module_utils.basic import missing_required_lib -from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.plugins.inventory import Cacheable -from ansible.plugins.inventory import Constructable -from ansible.template import Templar - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.plugin_utils.inventory import AWSInventoryBase # The mappings give an array of keys to get from the filter name to the value # returned by boto3's EC2 describe_instances method. instance_meta_filter_to_boto_attr = { - 'group-id': ('Groups', 'GroupId'), - 'group-name': ('Groups', 'GroupName'), - 'network-interface.attachment.instance-owner-id': ('OwnerId',), - 'owner-id': ('OwnerId',), - 'requester-id': ('RequesterId',), - 'reservation-id': ('ReservationId',), + "group-id": ("Groups", "GroupId"), + "group-name": ("Groups", "GroupName"), + "network-interface.attachment.instance-owner-id": ("OwnerId",), + "owner-id": ("OwnerId",), + "requester-id": ("RequesterId",), + "reservation-id": ("ReservationId",), } instance_data_filter_to_boto_attr = { - 'affinity': ('Placement', 'Affinity'), - 'architecture': ('Architecture',), - 'availability-zone': ('Placement', 'AvailabilityZone'), - 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'), - 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'), - 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'), - 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'), - 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'), - 'client-token': ('ClientToken',), - 'dns-name': ('PublicDnsName',), - 'host-id': ('Placement', 'HostId'), - 'hypervisor': ('Hypervisor',), - 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'), - 'image-id': ('ImageId',), - 'instance-id': ('InstanceId',), - 'instance-lifecycle': ('InstanceLifecycle',), - 'instance-state-code': ('State', 'Code'), - 'instance-state-name': ('State', 'Name'), - 'instance-type': ('InstanceType',), - 'instance.group-id': ('SecurityGroups', 'GroupId'), - 'instance.group-name': ('SecurityGroups', 'GroupName'), - 'ip-address': ('PublicIpAddress',), - 'kernel-id': ('KernelId',), - 'key-name': ('KeyName',), - 'launch-index': ('AmiLaunchIndex',), - 'launch-time': ('LaunchTime',), - 'monitoring-state': ('Monitoring', 'State'), - 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'), - 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'), - 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'), - 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'), - 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'), - 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'), - 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'), - 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'), - 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'), - 'network-interface.attachment.instance-id': ('InstanceId',), - 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'), - 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'), - 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'), - 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'), - 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'), - 'network-interface.description': ('NetworkInterfaces', 'Description'), - 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'), - 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'), - 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'), - 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'), - 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'), - 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'), - 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'), + "affinity": ("Placement", "Affinity"), + "architecture": ("Architecture",), + "availability-zone": ("Placement", "AvailabilityZone"), + "block-device-mapping.attach-time": ("BlockDeviceMappings", "Ebs", "AttachTime"), + "block-device-mapping.delete-on-termination": ("BlockDeviceMappings", "Ebs", "DeleteOnTermination"), + "block-device-mapping.device-name": ("BlockDeviceMappings", "DeviceName"), + "block-device-mapping.status": ("BlockDeviceMappings", "Ebs", "Status"), + "block-device-mapping.volume-id": ("BlockDeviceMappings", "Ebs", "VolumeId"), + "client-token": ("ClientToken",), + "dns-name": ("PublicDnsName",), + "host-id": ("Placement", "HostId"), + "hypervisor": ("Hypervisor",), + "iam-instance-profile.arn": ("IamInstanceProfile", "Arn"), + "image-id": ("ImageId",), + "instance-id": ("InstanceId",), + "instance-lifecycle": ("InstanceLifecycle",), + "instance-state-code": ("State", "Code"), + "instance-state-name": ("State", "Name"), + "instance-type": ("InstanceType",), + "instance.group-id": ("SecurityGroups", "GroupId"), + "instance.group-name": ("SecurityGroups", "GroupName"), + "ip-address": ("PublicIpAddress",), + "kernel-id": ("KernelId",), + "key-name": ("KeyName",), + "launch-index": ("AmiLaunchIndex",), + "launch-time": ("LaunchTime",), + "monitoring-state": ("Monitoring", "State"), + "network-interface.addresses.private-ip-address": ("NetworkInterfaces", "PrivateIpAddress"), + "network-interface.addresses.primary": ("NetworkInterfaces", "PrivateIpAddresses", "Primary"), + "network-interface.addresses.association.public-ip": ( + "NetworkInterfaces", + "PrivateIpAddresses", + "Association", + "PublicIp", + ), + "network-interface.addresses.association.ip-owner-id": ( + "NetworkInterfaces", + "PrivateIpAddresses", + "Association", + "IpOwnerId", + ), + "network-interface.association.public-ip": ("NetworkInterfaces", "Association", "PublicIp"), + "network-interface.association.ip-owner-id": ("NetworkInterfaces", "Association", "IpOwnerId"), + "network-interface.association.allocation-id": ("ElasticGpuAssociations", "ElasticGpuId"), + "network-interface.association.association-id": ("ElasticGpuAssociations", "ElasticGpuAssociationId"), + "network-interface.attachment.attachment-id": ("NetworkInterfaces", "Attachment", "AttachmentId"), + "network-interface.attachment.instance-id": ("InstanceId",), + "network-interface.attachment.device-index": ("NetworkInterfaces", "Attachment", "DeviceIndex"), + "network-interface.attachment.status": ("NetworkInterfaces", "Attachment", "Status"), + "network-interface.attachment.attach-time": ("NetworkInterfaces", "Attachment", "AttachTime"), + "network-interface.attachment.delete-on-termination": ("NetworkInterfaces", "Attachment", "DeleteOnTermination"), + "network-interface.availability-zone": ("Placement", "AvailabilityZone"), + "network-interface.description": ("NetworkInterfaces", "Description"), + "network-interface.group-id": ("NetworkInterfaces", "Groups", "GroupId"), + "network-interface.group-name": ("NetworkInterfaces", "Groups", "GroupName"), + "network-interface.ipv6-addresses.ipv6-address": ("NetworkInterfaces", "Ipv6Addresses", "Ipv6Address"), + "network-interface.mac-address": ("NetworkInterfaces", "MacAddress"), + "network-interface.network-interface-id": ("NetworkInterfaces", "NetworkInterfaceId"), + "network-interface.owner-id": ("NetworkInterfaces", "OwnerId"), + "network-interface.private-dns-name": ("NetworkInterfaces", "PrivateDnsName"), # 'network-interface.requester-id': (), - 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'), - 'network-interface.status': ('NetworkInterfaces', 'Status'), - 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'), - 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'), - 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'), - 'placement-group-name': ('Placement', 'GroupName'), - 'platform': ('Platform',), - 'private-dns-name': ('PrivateDnsName',), - 'private-ip-address': ('PrivateIpAddress',), - 'product-code': ('ProductCodes', 'ProductCodeId'), - 'product-code.type': ('ProductCodes', 'ProductCodeType'), - 'ramdisk-id': ('RamdiskId',), - 'reason': ('StateTransitionReason',), - 'root-device-name': ('RootDeviceName',), - 'root-device-type': ('RootDeviceType',), - 'source-dest-check': ('SourceDestCheck',), - 'spot-instance-request-id': ('SpotInstanceRequestId',), - 'state-reason-code': ('StateReason', 'Code'), - 'state-reason-message': ('StateReason', 'Message'), - 'subnet-id': ('SubnetId',), - 'tag': ('Tags',), - 'tag-key': ('Tags',), - 'tag-value': ('Tags',), - 'tenancy': ('Placement', 'Tenancy'), - 'virtualization-type': ('VirtualizationType',), - 'vpc-id': ('VpcId',), + "network-interface.requester-managed": ("NetworkInterfaces", "Association", "IpOwnerId"), + "network-interface.status": ("NetworkInterfaces", "Status"), + "network-interface.source-dest-check": ("NetworkInterfaces", "SourceDestCheck"), + "network-interface.subnet-id": ("NetworkInterfaces", "SubnetId"), + "network-interface.vpc-id": ("NetworkInterfaces", "VpcId"), + "placement-group-name": ("Placement", "GroupName"), + "platform": ("Platform",), + "private-dns-name": ("PrivateDnsName",), + "private-ip-address": ("PrivateIpAddress",), + "product-code": ("ProductCodes", "ProductCodeId"), + "product-code.type": ("ProductCodes", "ProductCodeType"), + "ramdisk-id": ("RamdiskId",), + "reason": ("StateTransitionReason",), + "root-device-name": ("RootDeviceName",), + "root-device-type": ("RootDeviceType",), + "source-dest-check": ("SourceDestCheck",), + "spot-instance-request-id": ("SpotInstanceRequestId",), + "state-reason-code": ("StateReason", "Code"), + "state-reason-message": ("StateReason", "Message"), + "subnet-id": ("SubnetId",), + "tag": ("Tags",), + "tag-key": ("Tags",), + "tag-value": ("Tags",), + "tenancy": ("Placement", "Tenancy"), + "virtualization-type": ("VirtualizationType",), + "vpc-id": ("VpcId",), } -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): +def _get_tag_hostname(preference, instance): + tag_hostnames = preference.split("tag:", 1)[1] + if "," in tag_hostnames: + tag_hostnames = tag_hostnames.split(",") + else: + tag_hostnames = [tag_hostnames] + + tags = boto3_tag_list_to_ansible_dict(instance.get("Tags", [])) + tag_values = [] + for v in tag_hostnames: + if "=" in v: + tag_name, tag_value = v.split("=") + if tags.get(tag_name) == tag_value: + tag_values.append(to_text(tag_name) + "_" + to_text(tag_value)) + else: + tag_value = tags.get(v) + if tag_value: + tag_values.append(to_text(tag_value)) + return tag_values - NAME = 'amazon.aws.aws_ec2' - def __init__(self): - super(InventoryModule, self).__init__() - - self.group_prefix = 'aws_ec2_' - - # credentials - self.boto_profile = None - self.aws_secret_access_key = None - self.aws_access_key_id = None - self.aws_security_token = None - self.iam_role_arn = None - - def _compile_values(self, obj, attr): - ''' - :param obj: A list or dict of instance attributes - :param attr: A key - :return The value(s) found via the attr - ''' - if obj is None: - return - - temp_obj = [] - - if isinstance(obj, list) or isinstance(obj, tuple): - for each in obj: - value = self._compile_values(each, attr) - if value: - temp_obj.append(value) - else: - temp_obj = obj.get(attr) +def _prepare_host_vars( + original_host_vars, + hostvars_prefix=None, + hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False, +): + host_vars = camel_dict_to_snake_dict(original_host_vars, ignore_list=["Tags"]) + host_vars["tags"] = boto3_tag_list_to_ansible_dict(original_host_vars.get("Tags", [])) - has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)]) - if has_indexes and len(temp_obj) == 1: - return temp_obj[0] + # Allow easier grouping by region + host_vars["placement"]["region"] = host_vars["placement"]["availability_zone"][:-1] - return temp_obj + if use_contrib_script_compatible_ec2_tag_keys: + for k, v in host_vars["tags"].items(): + host_vars[f"ec2_tag_{k}"] = v - def _get_boto_attr_chain(self, filter_name, instance): - ''' - :param filter_name: The filter - :param instance: instance dict returned by boto3 ec2 describe_instances() - ''' - allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys())) + if hostvars_prefix or hostvars_suffix: + for hostvar, hostval in host_vars.copy().items(): + del host_vars[hostvar] + if hostvars_prefix: + hostvar = hostvars_prefix + hostvar + if hostvars_suffix: + hostvar = hostvar + hostvars_suffix + host_vars[hostvar] = hostval - # If filter not in allow_filters -> use it as a literal string - if filter_name not in allowed_filters: - return filter_name + return host_vars - if filter_name in instance_data_filter_to_boto_attr: - boto_attr_list = instance_data_filter_to_boto_attr[filter_name] - else: - boto_attr_list = instance_meta_filter_to_boto_attr[filter_name] - - instance_value = instance - for attribute in boto_attr_list: - instance_value = self._compile_values(instance_value, attribute) - return instance_value - - def _get_credentials(self): - ''' - :return A dictionary of boto client credentials - ''' - boto_params = {} - for credential in (('aws_access_key_id', self.aws_access_key_id), - ('aws_secret_access_key', self.aws_secret_access_key), - ('aws_session_token', self.aws_security_token)): - if credential[1]: - boto_params[credential[0]] = credential[1] - - return boto_params - - def _get_connection(self, credentials, region='us-east-1'): - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - if self.boto_profile: - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - else: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - return connection - def _boto3_assume_role(self, credentials, region=None): - """ - Assume an IAM role passed by iam_role_arn parameter +def _compile_values(obj, attr): + """ + :param obj: A list or dict of instance attributes + :param attr: A key + :return The value(s) found via the attr + """ + if obj is None: + return - :return: a dict containing the credentials of the assumed role - """ + temp_obj = [] - iam_role_arn = self.iam_role_arn + if isinstance(obj, list) or isinstance(obj, tuple): + for each in obj: + value = _compile_values(each, attr) + if value: + temp_obj.append(value) + else: + temp_obj = obj.get(attr) - try: - sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials) - sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory') - return dict( - aws_access_key_id=sts_session['Credentials']['AccessKeyId'], - aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'], - aws_session_token=sts_session['Credentials']['SessionToken'] - ) - except botocore.exceptions.ClientError as e: - raise AnsibleError("Unable to assume IAM role: %s" % to_native(e)) + has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)]) + if has_indexes and len(temp_obj) == 1: + return temp_obj[0] - def _boto3_conn(self, regions): - ''' - :param regions: A list of regions to create a boto3 client + return temp_obj - Generator that yields a boto3 client and the region - ''' - credentials = self._get_credentials() - iam_role_arn = self.iam_role_arn +def _get_boto_attr_chain(filter_name, instance): + """ + :param filter_name: The filter + :param instance: instance dict returned by boto3 ec2 describe_instances() + """ + allowed_filters = sorted( + list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()) + ) - if not regions: - try: - # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html - client = self._get_connection(credentials) - resp = client.describe_regions() - regions = [x['RegionName'] for x in resp.get('Regions', [])] - except botocore.exceptions.NoRegionError: - # above seems to fail depending on boto3 version, ignore and lets try something else - pass - except is_boto3_error_code('UnauthorizedOperation') as e: # pylint: disable=duplicate-except - if iam_role_arn is not None: - try: - # Describe regions assuming arn role - assumed_credentials = self._boto3_assume_role(credentials) - client = self._get_connection(assumed_credentials) - resp = client.describe_regions() - regions = [x['RegionName'] for x in resp.get('Regions', [])] - except botocore.exceptions.NoRegionError: - # above seems to fail depending on boto3 version, ignore and lets try something else - pass - else: - raise AnsibleError("Unauthorized operation: %s" % to_native(e)) - - # fallback to local list hardcoded in boto3 if still no regions - if not regions: - session = boto3.Session() - regions = session.get_available_regions('ec2') - - # I give up, now you MUST give me regions - if not regions: - raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.') - - for region in regions: - connection = self._get_connection(credentials, region) - try: - if iam_role_arn is not None: - assumed_credentials = self._boto3_assume_role(credentials, region) - else: - assumed_credentials = credentials - connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - if self.boto_profile: - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - else: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - yield connection, region + # If filter not in allow_filters -> use it as a literal string + if filter_name not in allowed_filters: + return filter_name + + if filter_name in instance_data_filter_to_boto_attr: + boto_attr_list = instance_data_filter_to_boto_attr[filter_name] + else: + boto_attr_list = instance_meta_filter_to_boto_attr[filter_name] + + instance_value = instance + for attribute in boto_attr_list: + instance_value = _compile_values(instance_value, attribute) + return instance_value + + +def _describe_ec2_instances(connection, filters): + paginator = connection.get_paginator("describe_instances") + return paginator.paginate(Filters=filters).build_full_result() + + +def _get_ssm_information(client, filters): + paginator = client.get_paginator("get_inventory") + return paginator.paginate(Filters=filters).build_full_result() + + +class InventoryModule(AWSInventoryBase): + NAME = "amazon.aws.aws_ec2" + INVENTORY_FILE_SUFFIXES = ("aws_ec2.yml", "aws_ec2.yaml") + + def __init__(self): + super().__init__() + + self.group_prefix = "aws_ec2_" def _get_instances_by_region(self, regions, filters, strict_permissions): - ''' - :param regions: a list of regions in which to describe instances - :param filters: a list of boto3 filter dictionaries - :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes - :return A list of instance dictionaries - ''' + """ + :param regions: a list of regions in which to describe instances + :param filters: a list of boto3 filter dictionaries + :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes + :return A list of instance dictionaries + """ all_instances = [] + # By default find non-terminated/terminating instances + if not any(f["Name"] == "instance-state-name" for f in filters): + filters.append({"Name": "instance-state-name", "Values": ["running", "pending", "stopping", "stopped"]}) - for connection, _region in self._boto3_conn(regions): + for connection, _region in self.all_clients("ec2"): try: - # By default find non-terminated/terminating instances - if not any(f['Name'] == 'instance-state-name' for f in filters): - filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']}) - paginator = connection.get_paginator('describe_instances') - reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations') + reservations = _describe_ec2_instances(connection, filters).get("Reservations") instances = [] for r in reservations: - new_instances = r['Instances'] + new_instances = r["Instances"] + reservation_details = { + "OwnerId": r["OwnerId"], + "RequesterId": r.get("RequesterId", ""), + "ReservationId": r["ReservationId"], + } for instance in new_instances: - instance.update(self._get_reservation_details(r)) + instance.update(reservation_details) instances.extend(new_instances) - except botocore.exceptions.ClientError as e: - if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions: - instances = [] - else: - raise AnsibleError("Failed to describe instances: %s" % to_native(e)) - except botocore.exceptions.BotoCoreError as e: - raise AnsibleError("Failed to describe instances: %s" % to_native(e)) + except is_boto3_error_code("UnauthorizedOperation") as e: + if not strict_permissions: + continue + self.fail_aws("Failed to describe instances", exception=e) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.fail_aws("Failed to describe instances", exception=e) all_instances.extend(instances) return all_instances - def _get_reservation_details(self, reservation): - return { - 'OwnerId': reservation['OwnerId'], - 'RequesterId': reservation.get('RequesterId', ''), - 'ReservationId': reservation['ReservationId'] - } - - @classmethod - def _get_tag_hostname(cls, preference, instance): - tag_hostnames = preference.split('tag:', 1)[1] - if ',' in tag_hostnames: - tag_hostnames = tag_hostnames.split(',') - else: - tag_hostnames = [tag_hostnames] - - tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', [])) - tag_values = [] - for v in tag_hostnames: - if '=' in v: - tag_name, tag_value = v.split('=') - if tags.get(tag_name) == tag_value: - tag_values.append(to_text(tag_name) + "_" + to_text(tag_value)) - else: - tag_value = tags.get(v) - if tag_value: - tag_values.append(to_text(tag_value)) - return tag_values - def _sanitize_hostname(self, hostname): - if ':' in to_text(hostname): + if ":" in to_text(hostname): return self._sanitize_group_name(to_text(hostname)) else: return to_text(hostname) def _get_preferred_hostname(self, instance, hostnames): - ''' - :param instance: an instance dict returned by boto3 ec2 describe_instances() - :param hostnames: a list of hostname destination variables in order of preference - :return the preferred identifer for the host - ''' + """ + :param instance: an instance dict returned by boto3 ec2 describe_instances() + :param hostnames: a list of hostname destination variables in order of preference + :return the preferred identifer for the host + """ if not hostnames: - hostnames = ['dns-name', 'private-dns-name'] + hostnames = ["dns-name", "private-dns-name"] hostname = None for preference in hostnames: if isinstance(preference, dict): - if 'name' not in preference: - raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.") + if "name" not in preference: + self.fail_aws("A 'name' key must be defined in a hostnames dictionary.") hostname = self._get_preferred_hostname(instance, [preference["name"]]) - hostname_from_prefix = self._get_preferred_hostname(instance, [preference["prefix"]]) + hostname_from_prefix = None + if "prefix" in preference: + hostname_from_prefix = self._get_preferred_hostname(instance, [preference["prefix"]]) separator = preference.get("separator", "_") - if hostname and hostname_from_prefix and 'prefix' in preference: + if hostname and hostname_from_prefix and "prefix" in preference: hostname = hostname_from_prefix + separator + hostname - elif preference.startswith('tag:'): - tags = self._get_tag_hostname(preference, instance) + elif preference.startswith("tag:"): + tags = _get_tag_hostname(preference, instance) hostname = tags[0] if tags else None else: - hostname = self._get_boto_attr_chain(preference, instance) + hostname = _get_boto_attr_chain(preference, instance) if hostname: break if hostname: return self._sanitize_hostname(hostname) - def get_all_hostnames(self, instance, hostnames): - ''' - :param instance: an instance dict returned by boto3 ec2 describe_instances() - :param hostnames: a list of hostname destination variables - :return all the candidats matching the expectation - ''' + def _get_all_hostnames(self, instance, hostnames): + """ + :param instance: an instance dict returned by boto3 ec2 describe_instances() + :param hostnames: a list of hostname destination variables + :return all the candidats matching the expectation + """ if not hostnames: - hostnames = ['dns-name', 'private-dns-name'] + hostnames = ["dns-name", "private-dns-name"] hostname = None hostname_list = [] for preference in hostnames: if isinstance(preference, dict): - if 'name' not in preference: - raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.") - hostname = self.get_all_hostnames(instance, [preference["name"]]) - hostname_from_prefix = self.get_all_hostnames(instance, [preference["prefix"]]) + if "name" not in preference: + self.fail_aws("A 'name' key must be defined in a hostnames dictionary.") + hostname = self._get_all_hostnames(instance, [preference["name"]]) + hostname_from_prefix = None + if "prefix" in preference: + hostname_from_prefix = self._get_all_hostnames(instance, [preference["prefix"]]) separator = preference.get("separator", "_") - if hostname and hostname_from_prefix and 'prefix' in preference: + if hostname and hostname_from_prefix and "prefix" in preference: hostname = hostname_from_prefix[0] + separator + hostname[0] - elif preference.startswith('tag:'): - hostname = self._get_tag_hostname(preference, instance) + elif preference.startswith("tag:"): + hostname = _get_tag_hostname(preference, instance) else: - hostname = self._get_boto_attr_chain(preference, instance) + hostname = _get_boto_attr_chain(preference, instance) if hostname: if isinstance(hostname, list): @@ -678,38 +623,74 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return hostname_list - def _query(self, regions, include_filters, exclude_filters, strict_permissions): - ''' - :param regions: a list of regions to query - :param include_filters: a list of boto3 filter dictionaries - :param exclude_filters: a list of boto3 filter dictionaries - :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes + def _query(self, regions, include_filters, exclude_filters, strict_permissions, use_ssm_inventory): + """ + :param regions: a list of regions to query + :param include_filters: a list of boto3 filter dictionaries + :param exclude_filters: a list of boto3 filter dictionaries + :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes - ''' + """ instances = [] ids_to_ignore = [] for filter in exclude_filters: for i in self._get_instances_by_region( - regions, - ansible_dict_to_boto3_filter_list(filter), - strict_permissions): - ids_to_ignore.append(i['InstanceId']) + regions, + ansible_dict_to_boto3_filter_list(filter), + strict_permissions, + ): + ids_to_ignore.append(i["InstanceId"]) for filter in include_filters: for i in self._get_instances_by_region( - regions, - ansible_dict_to_boto3_filter_list(filter), - strict_permissions): - if i['InstanceId'] not in ids_to_ignore: + regions, + ansible_dict_to_boto3_filter_list(filter), + strict_permissions, + ): + if i["InstanceId"] not in ids_to_ignore: instances.append(i) - ids_to_ignore.append(i['InstanceId']) - - instances = sorted(instances, key=lambda x: x['InstanceId']) - - return {'aws_ec2': instances} - - def _populate(self, groups, hostnames, allow_duplicated_hosts=False, - hostvars_prefix=None, hostvars_suffix=None, - use_contrib_script_compatible_ec2_tag_keys=False): + ids_to_ignore.append(i["InstanceId"]) + + instances = sorted(instances, key=lambda x: x["InstanceId"]) + + if use_ssm_inventory and instances: + for connection, _region in self.all_clients("ssm"): + self._add_ssm_information(connection, instances) + + return {"aws_ec2": instances} + + def _add_ssm_information(self, connection, instances): + instance_ids = [x["InstanceId"] for x in instances] + result = self._get_multiple_ssm_inventories(connection, instance_ids) + for entity in result.get("Entities", []): + for x in instances: + if x["InstanceId"] == entity["Id"]: + content = entity.get("Data", {}).get("AWS:InstanceInformation", {}).get("Content", []) + if content: + x["SsmInventory"] = content[0] + break + + def _get_multiple_ssm_inventories(self, connection, instance_ids): + result = {} + # SSM inventory filters Values list can contain a maximum of 40 items so we need to retrieve 40 at a time + # https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_InventoryFilter.html + while len(instance_ids) > 40: + filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": instance_ids[:40]}] + result.update(_get_ssm_information(connection, filters)) + instance_ids = instance_ids[40:] + if instance_ids: + filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": instance_ids}] + result.update(_get_ssm_information(connection, filters)) + return result + + def _populate( + self, + groups, + hostnames, + allow_duplicated_hosts=False, + hostvars_prefix=None, + hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False, + ): for group in groups: group = self.inventory.add_group(group) self._add_hosts( @@ -719,190 +700,120 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): allow_duplicated_hosts=allow_duplicated_hosts, hostvars_prefix=hostvars_prefix, hostvars_suffix=hostvars_suffix, - use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys) - self.inventory.add_child('all', group) - - @classmethod - def prepare_host_vars(cls, original_host_vars, hostvars_prefix=None, hostvars_suffix=None, - use_contrib_script_compatible_ec2_tag_keys=False): - host_vars = camel_dict_to_snake_dict(original_host_vars, ignore_list=['Tags']) - host_vars['tags'] = boto3_tag_list_to_ansible_dict(original_host_vars.get('Tags', [])) - - # Allow easier grouping by region - host_vars['placement']['region'] = host_vars['placement']['availability_zone'][:-1] - - if use_contrib_script_compatible_ec2_tag_keys: - for k, v in host_vars['tags'].items(): - host_vars["ec2_tag_%s" % k] = v - - if hostvars_prefix or hostvars_suffix: - for hostvar, hostval in host_vars.copy().items(): - del host_vars[hostvar] - if hostvars_prefix: - hostvar = hostvars_prefix + hostvar - if hostvars_suffix: - hostvar = hostvar + hostvars_suffix - host_vars[hostvar] = hostval - - return host_vars - - def iter_entry(self, hosts, hostnames, allow_duplicated_hosts=False, hostvars_prefix=None, - hostvars_suffix=None, use_contrib_script_compatible_ec2_tag_keys=False): + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys, + ) + self.inventory.add_child("all", group) + + def iter_entry( + self, + hosts, + hostnames, + allow_duplicated_hosts=False, + hostvars_prefix=None, + hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False, + ): for host in hosts: if allow_duplicated_hosts: - hostname_list = self.get_all_hostnames(host, hostnames) + hostname_list = self._get_all_hostnames(host, hostnames) else: hostname_list = [self._get_preferred_hostname(host, hostnames)] if not hostname_list or hostname_list[0] is None: continue - host_vars = self.prepare_host_vars( + host_vars = _prepare_host_vars( host, hostvars_prefix, hostvars_suffix, - use_contrib_script_compatible_ec2_tag_keys) + use_contrib_script_compatible_ec2_tag_keys, + ) for name in hostname_list: yield to_text(name), host_vars - def _add_hosts(self, hosts, group, hostnames, allow_duplicated_hosts=False, - hostvars_prefix=None, hostvars_suffix=None, use_contrib_script_compatible_ec2_tag_keys=False): - ''' - :param hosts: a list of hosts to be added to a group - :param group: the name of the group to which the hosts belong - :param hostnames: a list of hostname destination variables in order of preference - :param bool allow_duplicated_hosts: if true, accept same host with different names - :param str hostvars_prefix: starts the hostvars variable name with this prefix - :param str hostvars_suffix: ends the hostvars variable name with this suffix - :param bool use_contrib_script_compatible_ec2_tag_keys: transform the host name with the legacy naming system - ''' + def _add_hosts( + self, + hosts, + group, + hostnames, + allow_duplicated_hosts=False, + hostvars_prefix=None, + hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False, + ): + """ + :param hosts: a list of hosts to be added to a group + :param group: the name of the group to which the hosts belong + :param hostnames: a list of hostname destination variables in order of preference + :param bool allow_duplicated_hosts: if true, accept same host with different names + :param str hostvars_prefix: starts the hostvars variable name with this prefix + :param str hostvars_suffix: ends the hostvars variable name with this suffix + :param bool use_contrib_script_compatible_ec2_tag_keys: transform the host name with the legacy naming system + """ for name, host_vars in self.iter_entry( - hosts, hostnames, - allow_duplicated_hosts=allow_duplicated_hosts, - hostvars_prefix=hostvars_prefix, - hostvars_suffix=hostvars_suffix, - use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys): + hosts, + hostnames, + allow_duplicated_hosts=allow_duplicated_hosts, + hostvars_prefix=hostvars_prefix, + hostvars_suffix=hostvars_suffix, + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys, + ): self.inventory.add_host(name, group=group) for k, v in host_vars.items(): self.inventory.set_variable(name, k, v) # Use constructed if applicable - strict = self.get_option('strict') + strict = self.get_option("strict") # Composed variables - self._set_composite_vars(self.get_option('compose'), host_vars, name, strict=strict) + self._set_composite_vars(self.get_option("compose"), host_vars, name, strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), host_vars, name, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), host_vars, name, strict=strict) # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, name, strict=strict) - - def _set_credentials(self, loader): - ''' - :param config_data: contents of the inventory config file - ''' - - t = Templar(loader=loader) - credentials = {} - - for credential_type in ['aws_profile', 'aws_access_key', 'aws_secret_key', 'aws_security_token', 'iam_role_arn']: - if t.is_template(self.get_option(credential_type)): - credentials[credential_type] = t.template(variable=self.get_option(credential_type), disable_lookups=False) - else: - credentials[credential_type] = self.get_option(credential_type) - - self.boto_profile = credentials['aws_profile'] - self.aws_access_key_id = credentials['aws_access_key'] - self.aws_secret_access_key = credentials['aws_secret_key'] - self.aws_security_token = credentials['aws_security_token'] - self.iam_role_arn = credentials['iam_role_arn'] - - if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key): - session = botocore.session.get_session() - try: - credentials = session.get_credentials().get_frozen_credentials() - except AttributeError: - pass - else: - self.aws_access_key_id = credentials.access_key - self.aws_secret_access_key = credentials.secret_key - self.aws_security_token = credentials.token - - if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key): - raise AnsibleError("Insufficient boto credentials found. Please provide them in your " - "inventory configuration file or set them as environment variables.") - - def verify_file(self, path): - ''' - :param loader: an ansible.parsing.dataloader.DataLoader object - :param path: the path to the inventory config file - :return the contents of the config file - ''' - if super(InventoryModule, self).verify_file(path): - if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')): - return True - self.display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'") - return False + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host_vars, name, strict=strict) def build_include_filters(self): - if self.get_option('filters'): - return [self.get_option('filters')] + self.get_option('include_filters') - elif self.get_option('include_filters'): - return self.get_option('include_filters') - else: # no filter - return [{}] + result = self.get_option("include_filters") + if self.get_option("filters"): + result = [self.get_option("filters")] + result + return result or [{}] def parse(self, inventory, loader, path, cache=True): + super().parse(inventory, loader, path, cache=cache) - super(InventoryModule, self).parse(inventory, loader, path) - - if not HAS_BOTO3: - raise AnsibleError(missing_required_lib('botocore and boto3')) - - self._read_config_data(path) - - if self.get_option('use_contrib_script_compatible_sanitization'): + if self.get_option("use_contrib_script_compatible_sanitization"): self._sanitize_group_name = self._legacy_script_compatible_group_sanitization - self._set_credentials(loader) - # get user specifications - regions = self.get_option('regions') + regions = self.get_option("regions") include_filters = self.build_include_filters() - exclude_filters = self.get_option('exclude_filters') - hostnames = self.get_option('hostnames') - strict_permissions = self.get_option('strict_permissions') - allow_duplicated_hosts = self.get_option('allow_duplicated_hosts') + exclude_filters = self.get_option("exclude_filters") + hostnames = self.get_option("hostnames") + strict_permissions = self.get_option("strict_permissions") + allow_duplicated_hosts = self.get_option("allow_duplicated_hosts") hostvars_prefix = self.get_option("hostvars_prefix") hostvars_suffix = self.get_option("hostvars_suffix") - use_contrib_script_compatible_ec2_tag_keys = self.get_option('use_contrib_script_compatible_ec2_tag_keys') + use_contrib_script_compatible_ec2_tag_keys = self.get_option("use_contrib_script_compatible_ec2_tag_keys") + use_ssm_inventory = self.get_option("use_ssm_inventory") - cache_key = self.get_cache_key(path) - # false when refresh_cache or --flush-cache is used - if cache: - # get the user-specified directive - cache = self.get_option('cache') + if not all(isinstance(element, (dict, str)) for element in hostnames): + self.fail_aws("Hostnames should be a list of dict and str.") - if self.get_option('include_extra_api_calls'): + if self.get_option("include_extra_api_calls"): self.display.deprecate( - "The include_extra_api_calls option has been deprecated " - " and will be removed in release 6.0.0.", - date='2024-09-01', collection_name='amazon.aws') + "The include_extra_api_calls option has been deprecated and will be removed in release 6.0.0.", + date="2024-09-01", + collection_name="amazon.aws", + ) - # Generate inventory - cache_needs_update = False - if cache: - try: - results = self._cache[cache_key] - except KeyError: - # if cache expires or cache file doesn't exist - cache_needs_update = True + result_was_cached, results = self.get_cached_result(path, cache) - if not cache or cache_needs_update: - results = self._query(regions, include_filters, exclude_filters, strict_permissions) + if not result_was_cached: + results = self._query(regions, include_filters, exclude_filters, strict_permissions, use_ssm_inventory) self._populate( results, @@ -910,17 +821,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): allow_duplicated_hosts=allow_duplicated_hosts, hostvars_prefix=hostvars_prefix, hostvars_suffix=hostvars_suffix, - use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys) + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys, + ) - # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used - # when the user is using caching, update the cached inventory - if cache_needs_update or (not cache and self.get_option('cache')): - self._cache[cache_key] = results + self.update_cached_result(path, cache, results) @staticmethod def _legacy_script_compatible_group_sanitization(name): - # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python regex = re.compile(r"[^A-Za-z0-9\_\-]") - return regex.sub('_', name) + return regex.sub("_", name) diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py b/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py index 02f86073a..430329c7e 100644 --- a/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py +++ b/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py @@ -1,10 +1,9 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: aws_rds short_description: RDS instance inventory source description: @@ -39,10 +38,6 @@ options: default: - creating - available - iam_role_arn: - description: - - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide - AWS credentials with enough privilege to perform the AssumeRole action. hostvars_prefix: description: - The prefix for host variables names coming from AWS. @@ -59,12 +54,14 @@ extends_documentation_fragment: - inventory_cache - constructed - amazon.aws.boto3 - - amazon.aws.aws_credentials + - amazon.aws.common.plugins + - amazon.aws.region.plugins + - amazon.aws.assume_role.plugins author: - Sloane Hertel (@s-hertel) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" plugin: aws_rds regions: - us-east-1 @@ -78,221 +75,146 @@ keyed_groups: - key: region hostvars_prefix: aws_ hostvars_suffix: _rds -''' +""" try: - import boto3 import botocore except ImportError: pass # will be captured by imported HAS_BOTO3 from ansible.errors import AnsibleError from ansible.module_utils._text import to_native -from ansible.module_utils.basic import missing_required_lib -from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.plugins.inventory import Cacheable -from ansible.plugins.inventory import Constructable +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.plugin_utils.inventory import AWSInventoryBase -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): +def _find_hosts_with_valid_statuses(hosts, statuses): + if "all" in statuses: + return hosts + valid_hosts = [] + for host in hosts: + if host.get("DBInstanceStatus") in statuses: + valid_hosts.append(host) + elif host.get("Status") in statuses: + valid_hosts.append(host) + return valid_hosts - NAME = 'amazon.aws.aws_rds' - def __init__(self): - super(InventoryModule, self).__init__() - self.credentials = {} - self.boto_profile = None - self.iam_role_arn = None +def _get_rds_hostname(host): + if host.get("DBInstanceIdentifier"): + return host["DBInstanceIdentifier"] + else: + return host["DBClusterIdentifier"] + + +def _add_tags_for_rds_hosts(connection, hosts, strict): + for host in hosts: + if "DBInstanceArn" in host: + resource_arn = host["DBInstanceArn"] + else: + resource_arn = host["DBClusterArn"] - def _get_connection(self, credentials, region='us-east-1'): try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - if self.boto_profile: - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + tags = connection.list_tags_for_resource(ResourceName=resource_arn)["TagList"] + except is_boto3_error_code("AccessDenied") as e: + if not strict: + tags = [] else: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - return connection - - def _boto3_assume_role(self, credentials, region): - """ - Assume an IAM role passed by iam_role_arn parameter - :return: a dict containing the credentials of the assumed role - """ + raise e + host["Tags"] = tags - iam_role_arn = self.iam_role_arn +def describe_resource_with_tags(func): + def describe_wrapper(connection, filters, strict=False): try: - sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials) - sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_rds_dynamic_inventory') - return dict( - aws_access_key_id=sts_session['Credentials']['AccessKeyId'], - aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'], - aws_session_token=sts_session['Credentials']['SessionToken'] - ) - except botocore.exceptions.ClientError as e: - raise AnsibleError("Unable to assume IAM role: %s" % to_native(e)) - - def _boto3_conn(self, regions): - ''' - :param regions: A list of regions to create a boto3 client - - Generator that yields a boto3 client and the region - ''' - iam_role_arn = self.iam_role_arn - credentials = self.credentials - for region in regions: - try: - if iam_role_arn is not None: - assumed_credentials = self._boto3_assume_role(credentials, region) - else: - assumed_credentials = credentials - connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **assumed_credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - if self.boto_profile: - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - else: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - yield connection, region - - def _get_hosts_by_region(self, connection, filters, strict): - - def _add_tags_for_hosts(connection, hosts, strict): - for host in hosts: - if 'DBInstanceArn' in host: - resource_arn = host['DBInstanceArn'] - else: - resource_arn = host['DBClusterArn'] - - try: - tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList'] - except is_boto3_error_code('AccessDenied') as e: - if not strict: - tags = [] - else: - raise e - host['Tags'] = tags - - def wrapper(f, *args, **kwargs): - try: - results = f(*args, **kwargs) - if 'DBInstances' in results: - results = results['DBInstances'] - else: - results = results['DBClusters'] - _add_tags_for_hosts(connection, results, strict) - except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except - if not strict: - results = [] - else: - raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) - return results - return wrapper - - def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False): - ''' - :param regions: a list of regions in which to describe hosts - :param instance_filters: a list of boto3 filter dictionaries - :param cluster_filters: a list of boto3 filter dictionaries - :param strict: a boolean determining whether to fail or ignore 403 error codes - :param statuses: a list of statuses that the returned hosts should match - :return A list of host dictionaries - ''' - all_instances = [] - all_clusters = [] - for connection, _region in self._boto3_conn(regions): - paginator = connection.get_paginator('describe_db_instances') - all_instances.extend( - self._get_hosts_by_region(connection, instance_filters, strict) - (paginator.paginate(Filters=instance_filters).build_full_result) - ) - if gather_clusters: - all_clusters.extend( - self._get_hosts_by_region(connection, cluster_filters, strict) - (connection.describe_db_clusters, **{'Filters': cluster_filters}) - ) - sorted_hosts = list( - sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) + - sorted(all_clusters, key=lambda x: x['DBClusterIdentifier']) - ) - return self.find_hosts_with_valid_statuses(sorted_hosts, statuses) + results = func(connection=connection, filters=filters) + if "DBInstances" in results: + results = results["DBInstances"] + else: + results = results["DBClusters"] + _add_tags_for_rds_hosts(connection, results, strict) + except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except + if not strict: + return [] + raise AnsibleError(f"Failed to query RDS: {to_native(e)}") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleError(f"Failed to query RDS: {to_native(e)}") + + return results + + return describe_wrapper + + +@describe_resource_with_tags +def _describe_db_instances(connection, filters): + paginator = connection.get_paginator("describe_db_instances") + return paginator.paginate(Filters=filters).build_full_result() - def find_hosts_with_valid_statuses(self, hosts, statuses): - if 'all' in statuses: - return hosts - valid_hosts = [] - for host in hosts: - if host.get('DBInstanceStatus') in statuses: - valid_hosts.append(host) - elif host.get('Status') in statuses: - valid_hosts.append(host) - return valid_hosts + +@describe_resource_with_tags +def _describe_db_clusters(connection, filters): + return connection.describe_db_clusters(Filters=filters) + + +class InventoryModule(AWSInventoryBase): + NAME = "amazon.aws.aws_rds" + INVENTORY_FILE_SUFFIXES = ("aws_rds.yml", "aws_rds.yaml") + + def __init__(self): + super().__init__() + self.credentials = {} def _populate(self, hosts): - group = 'aws_rds' + group = "aws_rds" self.inventory.add_group(group) if hosts: self._add_hosts(hosts=hosts, group=group) - self.inventory.add_child('all', group) + self.inventory.add_child("all", group) def _populate_from_source(self, source_data): - hostvars = source_data.pop('_meta', {}).get('hostvars', {}) + hostvars = source_data.pop("_meta", {}).get("hostvars", {}) for group in source_data: - if group == 'all': + if group == "all": continue - else: - self.inventory.add_group(group) - hosts = source_data[group].get('hosts', []) - for host in hosts: - self._populate_host_vars([host], hostvars.get(host, {}), group) - self.inventory.add_child('all', group) - - def _get_hostname(self, host): - if host.get('DBInstanceIdentifier'): - return host['DBInstanceIdentifier'] - else: - return host['DBClusterIdentifier'] + self.inventory.add_group(group) + hosts = source_data[group].get("hosts", []) + for host in hosts: + self._populate_host_vars([host], hostvars.get(host, {}), group) + self.inventory.add_child("all", group) def _format_inventory(self, hosts): - results = {'_meta': {'hostvars': {}}} - group = 'aws_rds' - results[group] = {'hosts': []} + results = {"_meta": {"hostvars": {}}} + group = "aws_rds" + results[group] = {"hosts": []} for host in hosts: - hostname = self._get_hostname(host) - results[group]['hosts'].append(hostname) + hostname = _get_rds_hostname(host) + results[group]["hosts"].append(hostname) h = self.inventory.get_host(hostname) - results['_meta']['hostvars'][h.name] = h.vars + results["_meta"]["hostvars"][h.name] = h.vars return results def _add_hosts(self, hosts, group): - ''' - :param hosts: a list of hosts to be added to a group - :param group: the name of the group to which the hosts belong - ''' + """ + :param hosts: a list of hosts to be added to a group + :param group: the name of the group to which the hosts belong + """ for host in hosts: - hostname = self._get_hostname(host) - host = camel_dict_to_snake_dict(host, ignore_list=['Tags']) - host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', [])) + hostname = _get_rds_hostname(host) + host = camel_dict_to_snake_dict(host, ignore_list=["Tags"]) + host["tags"] = boto3_tag_list_to_ansible_dict(host.get("tags", [])) # Allow easier grouping by region - if 'availability_zone' in host: - host['region'] = host['availability_zone'][:-1] - elif 'availability_zones' in host: - host['region'] = host['availability_zones'][0][:-1] + if "availability_zone" in host: + host["region"] = host["availability_zone"][:-1] + elif "availability_zones" in host: + host["region"] = host["availability_zones"][0][:-1] self.inventory.add_host(hostname, group=group) hostvars_prefix = self.get_option("hostvars_prefix") @@ -308,96 +230,65 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): host.update(new_vars) # Use constructed if applicable - strict = self.get_option('strict') + strict = self.get_option("strict") # Composed variables - self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict) + self._set_composite_vars(self.get_option("compose"), host, hostname, strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), host, hostname, strict=strict) # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict) - - def _set_credentials(self): - ''' - ''' - self.boto_profile = self.get_option('aws_profile') - aws_access_key_id = self.get_option('aws_access_key') - aws_secret_access_key = self.get_option('aws_secret_key') - aws_security_token = self.get_option('aws_security_token') - self.iam_role_arn = self.get_option('iam_role_arn') - - if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key): - session = botocore.session.get_session() - if session.get_credentials() is not None: - aws_access_key_id = session.get_credentials().access_key - aws_secret_access_key = session.get_credentials().secret_key - aws_security_token = session.get_credentials().token - - if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key): - raise AnsibleError("Insufficient boto credentials found. Please provide them in your " - "inventory configuration file or set them as environment variables.") - - if aws_access_key_id: - self.credentials['aws_access_key_id'] = aws_access_key_id - if aws_secret_access_key: - self.credentials['aws_secret_access_key'] = aws_secret_access_key - if aws_security_token: - self.credentials['aws_session_token'] = aws_security_token - - def verify_file(self, path): - ''' - :param loader: an ansible.parsing.dataloader.DataLoader object - :param path: the path to the inventory config file - :return the contents of the config file - ''' - if super(InventoryModule, self).verify_file(path): - if path.endswith(('aws_rds.yml', 'aws_rds.yaml')): - return True - return False + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host, hostname, strict=strict) - def parse(self, inventory, loader, path, cache=True): - super(InventoryModule, self).parse(inventory, loader, path) + def _get_all_db_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False): + """ + :param regions: a list of regions in which to describe hosts + :param instance_filters: a list of boto3 filter dictionaries + :param cluster_filters: a list of boto3 filter dictionaries + :param strict: a boolean determining whether to fail or ignore 403 error codes + :param statuses: a list of statuses that the returned hosts should match + :return A list of host dictionaries + """ + all_instances = [] + all_clusters = [] - if not HAS_BOTO3: - raise AnsibleError(missing_required_lib('botocore and boto3')) + for connection, _region in self.all_clients("rds"): + all_instances += _describe_db_instances(connection, instance_filters, strict=strict) + if gather_clusters: + all_clusters += _describe_db_clusters(connection, cluster_filters, strict=strict) + sorted_hosts = list( + sorted(all_instances, key=lambda x: x["DBInstanceIdentifier"]) + + sorted(all_clusters, key=lambda x: x["DBClusterIdentifier"]) + ) + return _find_hosts_with_valid_statuses(sorted_hosts, statuses) - self._read_config_data(path) - self._set_credentials() + def parse(self, inventory, loader, path, cache=True): + super().parse(inventory, loader, path, cache=cache) # get user specifications - regions = self.get_option('regions') - filters = self.get_option('filters') - strict_permissions = self.get_option('strict_permissions') - statuses = self.get_option('statuses') - include_clusters = self.get_option('include_clusters') + regions = self.get_option("regions") + filters = self.get_option("filters") + strict_permissions = self.get_option("strict_permissions") + statuses = self.get_option("statuses") + include_clusters = self.get_option("include_clusters") instance_filters = ansible_dict_to_boto3_filter_list(filters) cluster_filters = [] - if 'db-cluster-id' in filters and include_clusters: - cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']}) - - cache_key = self.get_cache_key(path) - # false when refresh_cache or --flush-cache is used - if cache: - # get the user-specified directive - cache = self.get_option('cache') - - # Generate inventory - formatted_inventory = {} - cache_needs_update = False - if cache: - try: - results = self._cache[cache_key] - except KeyError: - # if cache expires or cache file doesn't exist - cache_needs_update = True - else: - self._populate_from_source(results) - - if not cache or cache_needs_update: - results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters) - self._populate(results) - formatted_inventory = self._format_inventory(results) + if "db-cluster-id" in filters and include_clusters: + cluster_filters = ansible_dict_to_boto3_filter_list({"db-cluster-id": filters["db-cluster-id"]}) + + result_was_cached, cached_result = self.get_cached_result(path, cache) + if result_was_cached: + self._populate_from_source(cached_result) + return + + results = self._get_all_db_hosts( + regions, + instance_filters, + cluster_filters, + strict_permissions, + statuses, + include_clusters, + ) + self._populate(results) - # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used - # when the user is using caching, update the cached inventory - if cache_needs_update or (not cache and self.get_option('cache')): - self._cache[cache_key] = formatted_inventory + # Update the cache once we're done + formatted_inventory = self._format_inventory(results) + self.update_cached_result(path, cache, formatted_inventory) diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py b/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py index 415b76d75..180c40f8f 100644 --- a/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py @@ -1,16 +1,12 @@ +# -*- coding: utf-8 -*- + # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: aws_account_attribute author: - Sloane Hertel (@s-hertel) <shertel@redhat.com> -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.aws_credentials - - amazon.aws.aws_region short_description: Look up AWS account attributes description: - Describes attributes of your AWS account. You can specify one of the listed @@ -26,9 +22,13 @@ options: - max-elastic-ips - vpc-max-elastic-ips - has-ec2-classic -''' +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins +""" -EXAMPLES = """ +EXAMPLES = r""" vars: has_ec2_classic: "{{ lookup('aws_account_attribute', attribute='has-ec2-classic') }}" # true | false @@ -39,10 +39,9 @@ vars: account_details: "{{ lookup('aws_account_attribute', wantlist='true') }}" # {'default-vpc': ['vpc-xxxxxxxx'], 'max-elastic-ips': ['5'], 'max-instances': ['20'], # 'supported-platforms': ['VPC', 'EC2'], 'vpc-max-elastic-ips': ['5'], 'vpc-max-security-groups-per-interface': ['5']} - """ -RETURN = """ +RETURN = r""" _raw: description: Returns a boolean when I(attribute) is check_ec2_classic. Otherwise returns the value(s) of the attribute @@ -50,87 +49,50 @@ _raw: """ try: - import boto3 import botocore except ImportError: - pass # will be captured by imported HAS_BOTO3 + pass # Handled by AWSLookupBase from ansible.errors import AnsibleLookupError from ansible.module_utils._text import to_native -from ansible.module_utils.basic import missing_required_lib -from ansible.plugins.lookup import LookupBase - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.plugin_utils.lookup import AWSLookupBase -def _boto3_conn(region, credentials): - boto_profile = credentials.pop('aws_profile', None) - try: - connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region, **credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): - if boto_profile: - try: - connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): - raise AnsibleLookupError("Insufficient credentials found.") - else: - raise AnsibleLookupError("Insufficient credentials found.") - return connection - - -def _get_credentials(options): - credentials = {} - credentials['aws_profile'] = options['aws_profile'] - credentials['aws_secret_access_key'] = options['aws_secret_key'] - credentials['aws_access_key_id'] = options['aws_access_key'] - if options['aws_security_token']: - credentials['aws_session_token'] = options['aws_security_token'] - - return credentials - - -@AWSRetry.jittered_backoff(retries=10) def _describe_account_attributes(client, **params): - return client.describe_account_attributes(**params) + return client.describe_account_attributes(aws_retry=True, **params) -class LookupModule(LookupBase): +class LookupModule(AWSLookupBase): def run(self, terms, variables, **kwargs): + super().run(terms, variables, **kwargs) - if not HAS_BOTO3: - raise AnsibleLookupError(missing_required_lib('botocore and boto3')) - - self.set_options(var_options=variables, direct=kwargs) - boto_credentials = _get_credentials(self._options) - - region = self._options['region'] - client = _boto3_conn(region, boto_credentials) + client = self.client("ec2", AWSRetry.jittered_backoff()) - attribute = kwargs.get('attribute') - params = {'AttributeNames': []} + attribute = kwargs.get("attribute") + params = {"AttributeNames": []} check_ec2_classic = False - if 'has-ec2-classic' == attribute: + if "has-ec2-classic" == attribute: check_ec2_classic = True - params['AttributeNames'] = ['supported-platforms'] + params["AttributeNames"] = ["supported-platforms"] elif attribute: - params['AttributeNames'] = [attribute] + params["AttributeNames"] = [attribute] try: - response = _describe_account_attributes(client, **params)['AccountAttributes'] + response = _describe_account_attributes(client, **params)["AccountAttributes"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise AnsibleLookupError("Failed to describe account attributes: %s" % to_native(e)) + raise AnsibleLookupError(f"Failed to describe account attributes: {to_native(e)}") if check_ec2_classic: attr = response[0] - return any(value['AttributeValue'] == 'EC2' for value in attr['AttributeValues']) + return any(value["AttributeValue"] == "EC2" for value in attr["AttributeValues"]) if attribute: attr = response[0] - return [value['AttributeValue'] for value in attr['AttributeValues']] + return [value["AttributeValue"] for value in attr["AttributeValues"]] flattened = {} for k_v_dict in response: - flattened[k_v_dict['AttributeName']] = [value['AttributeValue'] for value in k_v_dict['AttributeValues']] + flattened[k_v_dict["AttributeName"]] = [value["AttributeValue"] for value in k_v_dict["AttributeValues"]] return flattened diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py b/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py new file mode 100644 index 000000000..35f05c94e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- + +# (c) 2023 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: aws_collection_constants +author: + - Mark Chappell (@tremble) +short_description: expose various collection related constants +version_added: 6.0.0 +description: + - Exposes various collection related constants for use in integration tests. +options: + _terms: + description: Name of the constant. + choices: + - MINIMUM_BOTOCORE_VERSION + - MINIMUM_BOTO3_VERSION + - HAS_BOTO3 + - AMAZON_AWS_COLLECTION_VERSION + - AMAZON_AWS_COLLECTION_NAME + - COMMUNITY_AWS_COLLECTION_VERSION + - COMMUNITY_AWS_COLLECTION_NAME + required: True +""" + +EXAMPLES = r""" +""" + +RETURN = r""" +_raw: + description: value + type: str +""" + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + +import ansible_collections.amazon.aws.plugins.module_utils.botocore as botocore_utils +import ansible_collections.amazon.aws.plugins.module_utils.common as common_utils + +try: + import ansible_collections.community.aws.plugins.module_utils.common as community_utils + + HAS_COMMUNITY = True +except ImportError: + HAS_COMMUNITY = False + + +class LookupModule(LookupBase): + def lookup_constant(self, name): + if name == "MINIMUM_BOTOCORE_VERSION": + return botocore_utils.MINIMUM_BOTOCORE_VERSION + if name == "MINIMUM_BOTO3_VERSION": + return botocore_utils.MINIMUM_BOTO3_VERSION + if name == "HAS_BOTO3": + return botocore_utils.HAS_BOTO3 + + if name == "AMAZON_AWS_COLLECTION_VERSION": + return common_utils.AMAZON_AWS_COLLECTION_VERSION + if name == "AMAZON_AWS_COLLECTION_NAME": + return common_utils.AMAZON_AWS_COLLECTION_NAME + + if name == "COMMUNITY_AWS_COLLECTION_VERSION": + if not HAS_COMMUNITY: + raise AnsibleLookupError("Unable to load ansible_collections.community.aws.plugins.module_utils.common") + return community_utils.COMMUNITY_AWS_COLLECTION_VERSION + if name == "COMMUNITY_AWS_COLLECTION_NAME": + if not HAS_COMMUNITY: + raise AnsibleLookupError("Unable to load ansible_collections.community.aws.plugins.module_utils.common") + return community_utils.COMMUNITY_AWS_COLLECTION_NAME + + def run(self, terms, variables, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + if not terms: + raise AnsibleLookupError("Constant name not provided") + if len(terms) > 1: + raise AnsibleLookupError("Multiple constant names provided") + name = terms[0].upper() + + return [self.lookup_constant(name)] diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py b/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py deleted file mode 100644 index 0f694cfa0..000000000 --- a/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' -name: aws_secret -author: - - Aaron Smith (!UNKNOWN) <ajsmith10381@gmail.com> -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.aws_credentials - - amazon.aws.aws_region - -short_description: Look up secrets stored in AWS Secrets Manager -description: - - Look up secrets stored in AWS Secrets Manager provided the caller - has the appropriate permissions to read the secret. - - Lookup is based on the secret's I(Name) value. - - Optional parameters can be passed into this lookup; I(version_id) and I(version_stage) -options: - _terms: - description: Name of the secret to look up in AWS Secrets Manager. - required: True - bypath: - description: A boolean to indicate whether the parameter is provided as a hierarchy. - default: false - type: boolean - version_added: 1.4.0 - nested: - description: A boolean to indicate the secret contains nested values. - type: boolean - default: false - version_added: 1.4.0 - version_id: - description: Version of the secret(s). - required: False - version_stage: - description: Stage of the secret version. - required: False - join: - description: - - Join two or more entries to form an extended secret. - - This is useful for overcoming the 4096 character limit imposed by AWS. - - No effect when used with I(bypath). - type: boolean - default: false - on_deleted: - description: - - Action to take if the secret has been marked for deletion. - - C(error) will raise a fatal error when the secret has been marked for deletion. - - C(skip) will silently ignore the deleted secret. - - C(warn) will skip over the deleted secret but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] - version_added: 2.0.0 - on_missing: - description: - - Action to take if the secret is missing. - - C(error) will raise a fatal error when the secret is missing. - - C(skip) will silently ignore the missing secret. - - C(warn) will skip over the missing secret but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] - on_denied: - description: - - Action to take if access to the secret is denied. - - C(error) will raise a fatal error when access to the secret is denied. - - C(skip) will silently ignore the denied secret. - - C(warn) will skip over the denied secret but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] -''' - -EXAMPLES = r""" - - name: lookup secretsmanager secret in the current region - debug: msg="{{ lookup('amazon.aws.aws_secret', '/path/to/secrets', bypath=true) }}" - - - name: Create RDS instance with aws_secret lookup for password param - rds: - command: create - instance_name: app-db - db_engine: MySQL - size: 10 - instance_type: db.m1.small - username: dbadmin - password: "{{ lookup('amazon.aws.aws_secret', 'DbSecret') }}" - tags: - Environment: staging - - - name: skip if secret does not exist - debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-not-exist', on_missing='skip')}}" - - - name: warn if access to the secret is denied - debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-denied', on_denied='warn')}}" - - - name: lookup secretsmanager secret in the current region using the nested feature - debug: msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', nested=true) }}" - # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. - # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. - - name: lookup secretsmanager secret in a specific region using specified region and aws profile using nested feature - debug: > - msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', region=region, aws_profile=aws_profile, - aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, nested=true) }}" - # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. - # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. - # Region is the AWS region where the AWS secret is stored. - # AWS_profile is the aws profile to use, that has access to the AWS secret. -""" - -RETURN = r""" -_raw: - description: - Returns the value of the secret stored in AWS Secrets Manager. -""" - -import json - -try: - import boto3 - import botocore -except ImportError: - pass # will be captured by imported HAS_BOTO3 - -from ansible.errors import AnsibleLookupError -from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_native -from ansible.module_utils.basic import missing_required_lib -from ansible.plugins.lookup import LookupBase - -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 - - -def _boto3_conn(region, credentials): - boto_profile = credentials.pop('aws_profile', None) - - try: - connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region, **credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): - if boto_profile: - try: - connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): - raise AnsibleLookupError("Insufficient credentials found.") - else: - raise AnsibleLookupError("Insufficient credentials found.") - return connection - - -class LookupModule(LookupBase): - def run(self, terms, variables=None, boto_profile=None, aws_profile=None, - aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None, - bypath=False, nested=False, join=False, version_stage=None, version_id=None, on_missing='error', - on_denied='error', on_deleted='error'): - ''' - :arg terms: a list of lookups to run. - e.g. ['parameter_name', 'parameter_name_too' ] - :kwarg variables: ansible variables active at the time of the lookup - :kwarg aws_secret_key: identity of the AWS key to use - :kwarg aws_access_key: AWS secret key (matching identity) - :kwarg aws_security_token: AWS session key if using STS - :kwarg decrypt: Set to True to get decrypted parameters - :kwarg region: AWS region in which to do the lookup - :kwarg bypath: Set to True to do a lookup of variables under a path - :kwarg nested: Set to True to do a lookup of nested secrets - :kwarg join: Join two or more entries to form an extended secret - :kwarg version_stage: Stage of the secret version - :kwarg version_id: Version of the secret(s) - :kwarg on_missing: Action to take if the secret is missing - :kwarg on_deleted: Action to take if the secret is marked for deletion - :kwarg on_denied: Action to take if access to the secret is denied - :returns: A list of parameter values or a list of dictionaries if bypath=True. - ''' - if not HAS_BOTO3: - raise AnsibleLookupError(missing_required_lib('botocore and boto3')) - - deleted = on_deleted.lower() - if not isinstance(deleted, string_types) or deleted not in ['error', 'warn', 'skip']: - raise AnsibleLookupError('"on_deleted" must be a string and one of "error", "warn" or "skip", not %s' % deleted) - - missing = on_missing.lower() - if not isinstance(missing, string_types) or missing not in ['error', 'warn', 'skip']: - raise AnsibleLookupError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % missing) - - denied = on_denied.lower() - if not isinstance(denied, string_types) or denied not in ['error', 'warn', 'skip']: - raise AnsibleLookupError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % denied) - - credentials = {} - if aws_profile: - credentials['aws_profile'] = aws_profile - else: - credentials['aws_profile'] = boto_profile - credentials['aws_secret_access_key'] = aws_secret_key - credentials['aws_access_key_id'] = aws_access_key - credentials['aws_session_token'] = aws_security_token - - # fallback to IAM role credentials - if not credentials['aws_profile'] and not ( - credentials['aws_access_key_id'] and credentials['aws_secret_access_key']): - session = botocore.session.get_session() - if session.get_credentials() is not None: - credentials['aws_access_key_id'] = session.get_credentials().access_key - credentials['aws_secret_access_key'] = session.get_credentials().secret_key - credentials['aws_session_token'] = session.get_credentials().token - - client = _boto3_conn(region, credentials) - - if bypath: - secrets = {} - for term in terms: - try: - paginator = client.get_paginator('list_secrets') - paginator_response = paginator.paginate( - Filters=[{'Key': 'name', 'Values': [term]}]) - for object in paginator_response: - if 'SecretList' in object: - for secret_obj in object['SecretList']: - secrets.update({secret_obj['Name']: self.get_secret_value( - secret_obj['Name'], client, on_missing=missing, on_denied=denied)}) - secrets = [secrets] - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise AnsibleLookupError("Failed to retrieve secret: %s" % to_native(e)) - else: - secrets = [] - for term in terms: - value = self.get_secret_value(term, client, - version_stage=version_stage, version_id=version_id, - on_missing=missing, on_denied=denied, on_deleted=deleted, - nested=nested) - if value: - secrets.append(value) - if join: - joined_secret = [] - joined_secret.append(''.join(secrets)) - return joined_secret - - return secrets - - def get_secret_value(self, term, client, version_stage=None, version_id=None, on_missing=None, on_denied=None, on_deleted=None, nested=False): - params = {} - params['SecretId'] = term - if version_id: - params['VersionId'] = version_id - if version_stage: - params['VersionStage'] = version_stage - if nested: - if len(term.split('.')) < 2: - raise AnsibleLookupError("Nested query must use the following syntax: `aws_secret_name.<key_name>.<key_name>") - secret_name = term.split('.')[0] - params['SecretId'] = secret_name - - try: - response = client.get_secret_value(**params) - if 'SecretBinary' in response: - return response['SecretBinary'] - if 'SecretString' in response: - if nested: - query = term.split('.')[1:] - secret_string = json.loads(response['SecretString']) - ret_val = secret_string - for key in query: - if key in ret_val: - ret_val = ret_val[key] - else: - raise AnsibleLookupError("Successfully retrieved secret but there exists no key {0} in the secret".format(key)) - return str(ret_val) - else: - return response['SecretString'] - except is_boto3_error_message('marked for deletion'): - if on_deleted == 'error': - raise AnsibleLookupError("Failed to find secret %s (marked for deletion)" % term) - elif on_deleted == 'warn': - self._display.warning('Skipping, did not find secret (marked for deletion) %s' % term) - except is_boto3_error_code('ResourceNotFoundException'): # pylint: disable=duplicate-except - if on_missing == 'error': - raise AnsibleLookupError("Failed to find secret %s (ResourceNotFound)" % term) - elif on_missing == 'warn': - self._display.warning('Skipping, did not find secret %s' % term) - except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except - if on_denied == 'error': - raise AnsibleLookupError("Failed to access secret %s (AccessDenied)" % term) - elif on_denied == 'warn': - self._display.warning('Skipping, access denied for secret %s' % term) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("Failed to retrieve secret: %s" % to_native(e)) - - return None diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py index 251debf40..c01f583f0 100644 --- a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py @@ -1,10 +1,10 @@ +# -*- coding: utf-8 -*- + # (c) 2016 James Turner <turnerjsm@gmail.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: aws_service_ip_ranges author: - James Turner (!UNKNOWN) <turnerjsm@gmail.com> @@ -22,23 +22,22 @@ options: ipv6_prefixes: description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses' version_added: 2.1.0 -''' +""" -EXAMPLES = """ +EXAMPLES = r""" vars: ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}" tasks: + - name: "use list return option and iterate as a loop" + debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}" + # "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 " -- name: "use list return option and iterate as a loop" - debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}" -# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 " - -- name: "Pull S3 IP ranges, and print the default return style" - debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}" -# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17" + - name: "Pull S3 IP ranges, and print the default return style" + debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}" + # "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17" """ -RETURN = """ +RETURN = r""" _raw: description: comma-separated list of CIDR ranges """ @@ -46,12 +45,12 @@ _raw: import json from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils.six.moves.urllib.error import URLError -from ansible.module_utils._text import to_native from ansible.module_utils.urls import ConnectionError -from ansible.module_utils.urls import open_url from ansible.module_utils.urls import SSLValidationError +from ansible.module_utils.urls import open_url from ansible.plugins.lookup import LookupBase @@ -65,26 +64,26 @@ class LookupModule(LookupBase): ip_prefix_label = "ip_prefix" try: - resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json') + resp = open_url("https://ip-ranges.amazonaws.com/ip-ranges.json") amazon_response = json.load(resp)[prefixes_label] - except getattr(json.decoder, 'JSONDecodeError', ValueError) as e: + except getattr(json.decoder, "JSONDecodeError", ValueError) as e: # on Python 3+, json.decoder.JSONDecodeError is raised for bad # JSON. On 2.x it's a ValueError - raise AnsibleLookupError("Could not decode AWS IP ranges: %s" % to_native(e)) + raise AnsibleLookupError(f"Could not decode AWS IP ranges: {to_native(e)}") except HTTPError as e: - raise AnsibleLookupError("Received HTTP error while pulling IP ranges: %s" % to_native(e)) + raise AnsibleLookupError(f"Received HTTP error while pulling IP ranges: {to_native(e)}") except SSLValidationError as e: - raise AnsibleLookupError("Error validating the server's certificate for: %s" % to_native(e)) + raise AnsibleLookupError(f"Error validating the server's certificate for: {to_native(e)}") except URLError as e: - raise AnsibleLookupError("Failed look up IP range service: %s" % to_native(e)) + raise AnsibleLookupError(f"Failed look up IP range service: {to_native(e)}") except ConnectionError as e: - raise AnsibleLookupError("Error connecting to IP range service: %s" % to_native(e)) + raise AnsibleLookupError(f"Error connecting to IP range service: {to_native(e)}") - if 'region' in kwargs: - region = kwargs['region'] - amazon_response = (item for item in amazon_response if item['region'] == region) - if 'service' in kwargs: - service = str.upper(kwargs['service']) - amazon_response = (item for item in amazon_response if item['service'] == service) + if "region" in kwargs: + region = kwargs["region"] + amazon_response = (item for item in amazon_response if item["region"] == region) + if "service" in kwargs: + service = str.upper(kwargs["service"]) + amazon_response = (item for item in amazon_response if item["service"] == service) iprange = [item[ip_prefix_label] for item in amazon_response] return iprange diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py b/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py deleted file mode 100644 index e71808560..000000000 --- a/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py +++ /dev/null @@ -1,286 +0,0 @@ -# (c) 2016, Bill Wang <ozbillwang(at)gmail.com> -# (c) 2017, Marat Bakeev <hawara(at)gmail.com> -# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com> -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' -name: aws_ssm -author: - - Bill Wang (!UNKNOWN) <ozbillwang(at)gmail.com> - - Marat Bakeev (!UNKNOWN) <hawara(at)gmail.com> - - Michael De La Rue (!UNKNOWN) <siblemitcom.mddlr@spamgourmet.com> -short_description: Get the value for a SSM parameter or all parameters under a path -description: - - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters. - The first argument you pass the lookup can either be a parameter name or a hierarchy of - parameters. Hierarchies start with a forward slash and end with the parameter name. Up to - 5 layers may be specified. - - If looking up an explicitly listed parameter by name which does not exist then the lookup - will generate an error. You can use the ```default``` filter to give a default value in - this case but must set the ```on_missing``` parameter to ```skip``` or ```warn```. You must - also set the second parameter of the ```default``` filter to ```true``` (see examples below). - - When looking up a path for parameters under it a dictionary will be returned for each path. - If there is no parameter under that path then the lookup will generate an error. - - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm - will generate an error. If you want to continue in this case then you will have to set up - two ansible tasks, one which sets a variable and ignores failures and one which uses the value - of that variable with a default. See the examples below. - -options: - decrypt: - description: A boolean to indicate whether to decrypt the parameter. - default: true - type: boolean - bypath: - description: A boolean to indicate whether the parameter is provided as a hierarchy. - default: false - type: boolean - recursive: - description: A boolean to indicate whether to retrieve all parameters within a hierarchy. - default: false - type: boolean - shortnames: - description: Indicates whether to return the name only without path if using a parameter hierarchy. - default: false - type: boolean - on_missing: - description: - - Action to take if the SSM parameter is missing. - - C(error) will raise a fatal error when the SSM parameter is missing. - - C(skip) will silently ignore the missing SSM parameter. - - C(warn) will skip over the missing SSM parameter but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] - version_added: 2.0.0 - on_denied: - description: - - Action to take if access to the SSM parameter is denied. - - C(error) will raise a fatal error when access to the SSM parameter is denied. - - C(skip) will silently ignore the denied SSM parameter. - - C(warn) will skip over the denied SSM parameter but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] - version_added: 2.0.0 - endpoint: - description: Use a custom endpoint when connecting to SSM service. - type: string - version_added: 3.3.0 -extends_documentation_fragment: - - amazon.aws.boto3 -''' - -EXAMPLES = ''' -# lookup sample: -- name: lookup ssm parameter store in the current region - debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}" - -- name: lookup ssm parameter store in specified region - debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}" - -- name: lookup ssm parameter store without decryption - debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}" - -- name: lookup ssm parameter store using a specified aws profile - debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}" - -- name: lookup ssm parameter store using explicit aws credentials - debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}" - -- name: lookup ssm parameter store with all options - debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}" - -- name: lookup ssm parameter and fail if missing - debug: msg="{{ lookup('aws_ssm', 'missing-parameter') }}" - -- name: lookup a key which doesn't exist, returning a default ('root') - debug: msg="{{ lookup('aws_ssm', 'AdminID', on_missing="skip") | default('root', true) }}" - -- name: lookup a key which doesn't exist failing to store it in a fact - set_fact: - temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}" - ignore_errors: true - -- name: show fact default to "access failed" if we don't have access - debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}" - -- name: return a dictionary of ssm parameters from a hierarchy path - debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}" - -- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param) - debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}" - -- name: Iterate over a parameter hierarchy (one iteration per parameter) - debug: msg='Key contains {{ item.key }} , with value {{ item.value }}' - loop: '{{ lookup("aws_ssm", "/demo/", region="ap-southeast-2", bypath=True) | dict2items }}' - -- name: Iterate over multiple paths as dictionaries (one iteration per path) - debug: msg='Path contains {{ item }}' - loop: '{{ lookup("aws_ssm", "/demo/", "/demo1/", bypath=True)}}' - -- name: lookup ssm parameter warn if access is denied - debug: msg="{{ lookup('aws_ssm', 'missing-parameter', on_denied="warn" ) }}" -''' - -try: - import botocore -except ImportError: - pass # will be captured by imported HAS_BOTO3 - -from ansible.errors import AnsibleLookupError -from ansible.module_utils._text import to_native -from ansible.plugins.lookup import LookupBase -from ansible.utils.display import Display -from ansible.module_utils.six import string_types -from ansible.module_utils.basic import missing_required_lib - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - -display = Display() - - -class LookupModule(LookupBase): - def run(self, terms, variables=None, boto_profile=None, aws_profile=None, - aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None, - bypath=False, shortnames=False, recursive=False, decrypt=True, on_missing="error", - on_denied="error", endpoint=None): - ''' - :arg terms: a list of lookups to run. - e.g. ['parameter_name', 'parameter_name_too' ] - :kwarg variables: ansible variables active at the time of the lookup - :kwarg aws_secret_key: identity of the AWS key to use - :kwarg aws_access_key: AWS secret key (matching identity) - :kwarg aws_security_token: AWS session key if using STS - :kwarg decrypt: Set to True to get decrypted parameters - :kwarg region: AWS region in which to do the lookup - :kwarg bypath: Set to True to do a lookup of variables under a path - :kwarg recursive: Set to True to recurse below the path (requires bypath=True) - :kwarg on_missing: Action to take if the SSM parameter is missing - :kwarg on_denied: Action to take if access to the SSM parameter is denied - :kwarg endpoint: Endpoint for SSM client - :returns: A list of parameter values or a list of dictionaries if bypath=True. - ''' - - if not HAS_BOTO3: - raise AnsibleLookupError(missing_required_lib('botocore and boto3')) - - # validate arguments 'on_missing' and 'on_denied' - if on_missing is not None and (not isinstance(on_missing, string_types) or on_missing.lower() not in ['error', 'warn', 'skip']): - raise AnsibleLookupError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % on_missing) - if on_denied is not None and (not isinstance(on_denied, string_types) or on_denied.lower() not in ['error', 'warn', 'skip']): - raise AnsibleLookupError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % on_denied) - - ret = [] - ssm_dict = {} - - self.params = variables - - cli_region, cli_endpoint, cli_boto_params = get_aws_connection_info(self, boto3=True) - - if region: - cli_region = region - - if endpoint: - cli_endpoint = endpoint - - # For backward compatibility - if aws_access_key: - cli_boto_params.update({'aws_access_key_id': aws_access_key}) - if aws_secret_key: - cli_boto_params.update({'aws_secret_access_key': aws_secret_key}) - if aws_security_token: - cli_boto_params.update({'aws_session_token': aws_security_token}) - if boto_profile: - cli_boto_params.update({'profile_name': boto_profile}) - if aws_profile: - cli_boto_params.update({'profile_name': aws_profile}) - - cli_boto_params.update(dict( - conn_type='client', - resource='ssm', - region=cli_region, - endpoint=cli_endpoint, - )) - - client = boto3_conn(module=self, **cli_boto_params) - - ssm_dict['WithDecryption'] = decrypt - - # Lookup by path - if bypath: - ssm_dict['Recursive'] = recursive - for term in terms: - display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region)) - - paramlist = self.get_path_parameters(client, ssm_dict, term, on_missing.lower(), on_denied.lower()) - # Shorten parameter names. Yes, this will return - # duplicate names with different values. - if shortnames: - for x in paramlist: - x['Name'] = x['Name'][x['Name'].rfind('/') + 1:] - - display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist)) - - ret.append(boto3_tag_list_to_ansible_dict(paramlist, - tag_name_key_name="Name", - tag_value_key_name="Value")) - # Lookup by parameter name - always returns a list with one or - # no entry. - else: - display.vvv("AWS_ssm name lookup term: %s" % terms) - for term in terms: - ret.append(self.get_parameter_value(client, ssm_dict, term, on_missing.lower(), on_denied.lower())) - display.vvvv("AWS_ssm path lookup returning: %s " % str(ret)) - return ret - - def get_path_parameters(self, client, ssm_dict, term, on_missing, on_denied): - ssm_dict["Path"] = term - paginator = client.get_paginator('get_parameters_by_path') - try: - paramlist = paginator.paginate(**ssm_dict).build_full_result()['Parameters'] - except is_boto3_error_code('AccessDeniedException'): - if on_denied == 'error': - raise AnsibleLookupError("Failed to access SSM parameter path %s (AccessDenied)" % term) - elif on_denied == 'warn': - self._display.warning('Skipping, access denied for SSM parameter path %s' % term) - paramlist = [{}] - elif on_denied == 'skip': - paramlist = [{}] - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("SSM lookup exception: {0}".format(to_native(e))) - - if not len(paramlist): - if on_missing == "error": - raise AnsibleLookupError("Failed to find SSM parameter path %s (ResourceNotFound)" % term) - elif on_missing == "warn": - self._display.warning('Skipping, did not find SSM parameter path %s' % term) - - return paramlist - - def get_parameter_value(self, client, ssm_dict, term, on_missing, on_denied): - ssm_dict["Name"] = term - try: - response = client.get_parameter(**ssm_dict) - return response['Parameter']['Value'] - except is_boto3_error_code('ParameterNotFound'): - if on_missing == 'error': - raise AnsibleLookupError("Failed to find SSM parameter %s (ResourceNotFound)" % term) - elif on_missing == 'warn': - self._display.warning('Skipping, did not find SSM parameter %s' % term) - except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except - if on_denied == 'error': - raise AnsibleLookupError("Failed to access SSM parameter %s (AccessDenied)" % term) - elif on_denied == 'warn': - self._display.warning('Skipping, access denied for SSM parameter %s' % term) - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("SSM lookup exception: {0}".format(to_native(e))) - return None diff --git a/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py b/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py new file mode 100644 index 000000000..06ad10be5 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: secretsmanager_secret +author: + - Aaron Smith (!UNKNOWN) <ajsmith10381@gmail.com> + +short_description: Look up secrets stored in AWS Secrets Manager +description: + - Look up secrets stored in AWS Secrets Manager provided the caller + has the appropriate permissions to read the secret. + - Lookup is based on the secret's I(Name) value. + - Optional parameters can be passed into this lookup; I(version_id) and I(version_stage) + - Prior to release 6.0.0 this module was known as C(aws_ssm), the usage remains the same. + +options: + _terms: + description: Name of the secret to look up in AWS Secrets Manager. + required: True + bypath: + description: A boolean to indicate whether the parameter is provided as a hierarchy. + default: false + type: boolean + version_added: 1.4.0 + nested: + description: A boolean to indicate the secret contains nested values. + type: boolean + default: false + version_added: 1.4.0 + version_id: + description: Version of the secret(s). + required: False + version_stage: + description: Stage of the secret version. + required: False + join: + description: + - Join two or more entries to form an extended secret. + - This is useful for overcoming the 4096 character limit imposed by AWS. + - No effect when used with I(bypath). + type: boolean + default: false + on_deleted: + description: + - Action to take if the secret has been marked for deletion. + - C(error) will raise a fatal error when the secret has been marked for deletion. + - C(skip) will silently ignore the deleted secret. + - C(warn) will skip over the deleted secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 + on_missing: + description: + - Action to take if the secret is missing. + - C(error) will raise a fatal error when the secret is missing. + - C(skip) will silently ignore the missing secret. + - C(warn) will skip over the missing secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + on_denied: + description: + - Action to take if access to the secret is denied. + - C(error) will raise a fatal error when access to the secret is denied. + - C(skip) will silently ignore the denied secret. + - C(warn) will skip over the denied secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins +""" + +EXAMPLES = r""" +- name: lookup secretsmanager secret in the current region + debug: msg="{{ lookup('amazon.aws.aws_secret', '/path/to/secrets', bypath=true) }}" + +- name: Create RDS instance with aws_secret lookup for password param + rds: + command: create + instance_name: app-db + db_engine: MySQL + size: 10 + instance_type: db.m1.small + username: dbadmin + password: "{{ lookup('amazon.aws.aws_secret', 'DbSecret') }}" + tags: + Environment: staging + +- name: skip if secret does not exist + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-not-exist', on_missing='skip')}}" + +- name: warn if access to the secret is denied + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-denied', on_denied='warn')}}" + +- name: lookup secretsmanager secret in the current region using the nested feature + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', nested=true) }}" + # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. + # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. +- name: lookup secretsmanager secret in a specific region using specified region and aws profile using nested feature + debug: > + msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', region=region, profile=aws_profile, + access_key=aws_access_key, secret_key=aws_secret_key, nested=true) }}" + # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. + # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. + # Region is the AWS region where the AWS secret is stored. + # AWS_profile is the aws profile to use, that has access to the AWS secret. +""" + +RETURN = r""" +_raw: + description: + Returns the value of the secret stored in AWS Secrets Manager. +""" + +import json + +try: + import botocore +except ImportError: + pass # Handled by AWSLookupBase + +from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_native +from ansible.module_utils.six import string_types + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.plugin_utils.lookup import AWSLookupBase + + +def _list_secrets(client, term): + paginator = client.get_paginator("list_secrets") + return paginator.paginate(Filters=[{"Key": "name", "Values": [term]}]) + + +class LookupModule(AWSLookupBase): + def run(self, terms, variables, **kwargs): + """ + :arg terms: a list of lookups to run. + e.g. ['example_secret_name', 'example_secret_too' ] + :variables: ansible variables active at the time of the lookup + :returns: A list of parameter values or a list of dictionaries if bypath=True. + """ + + super().run(terms, variables, **kwargs) + + on_missing = self.get_option("on_missing") + on_denied = self.get_option("on_denied") + on_deleted = self.get_option("on_deleted") + + # validate arguments 'on_missing' and 'on_denied' + if on_missing is not None and ( + not isinstance(on_missing, string_types) or on_missing.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_missing" must be a string and one of "error", "warn" or "skip", not {on_missing}' + ) + if on_denied is not None and ( + not isinstance(on_denied, string_types) or on_denied.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_denied" must be a string and one of "error", "warn" or "skip", not {on_denied}' + ) + if on_deleted is not None and ( + not isinstance(on_deleted, string_types) or on_deleted.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_deleted" must be a string and one of "error", "warn" or "skip", not {on_deleted}' + ) + + client = self.client("secretsmanager", AWSRetry.jittered_backoff()) + + if self.get_option("bypath"): + secrets = {} + for term in terms: + try: + for object in _list_secrets(client, term): + if "SecretList" in object: + for secret_obj in object["SecretList"]: + secrets.update( + { + secret_obj["Name"]: self.get_secret_value( + secret_obj["Name"], client, on_missing=on_missing, on_denied=on_denied + ) + } + ) + secrets = [secrets] + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise AnsibleLookupError(f"Failed to retrieve secret: {to_native(e)}") + else: + secrets = [] + for term in terms: + value = self.get_secret_value( + term, + client, + version_stage=self.get_option("version_stage"), + version_id=self.get_option("version_id"), + on_missing=on_missing, + on_denied=on_denied, + on_deleted=on_deleted, + nested=self.get_option("nested"), + ) + if value: + secrets.append(value) + if self.get_option("join"): + joined_secret = [] + joined_secret.append("".join(secrets)) + return joined_secret + + return secrets + + def get_secret_value( + self, + term, + client, + version_stage=None, + version_id=None, + on_missing=None, + on_denied=None, + on_deleted=None, + nested=False, + ): + params = {} + params["SecretId"] = term + if version_id: + params["VersionId"] = version_id + if version_stage: + params["VersionStage"] = version_stage + if nested: + if len(term.split(".")) < 2: + raise AnsibleLookupError( + "Nested query must use the following syntax: `aws_secret_name.<key_name>.<key_name>" + ) + secret_name = term.split(".")[0] + params["SecretId"] = secret_name + + try: + response = client.get_secret_value(aws_retry=True, **params) + if "SecretBinary" in response: + return response["SecretBinary"] + if "SecretString" in response: + if nested: + query = term.split(".")[1:] + path = None + secret_string = json.loads(response["SecretString"]) + ret_val = secret_string + while query: + key = query.pop(0) + path = key if not path else path + "." + key + if key in ret_val: + ret_val = ret_val[key] + elif on_missing == "warn": + self._display.warning( + f"Skipping, Successfully retrieved secret but there exists no key {path} in the secret" + ) + return None + elif on_missing == "error": + raise AnsibleLookupError( + f"Successfully retrieved secret but there exists no key {path} in the secret" + ) + return str(ret_val) + else: + return response["SecretString"] + except is_boto3_error_message("marked for deletion"): + if on_deleted == "error": + raise AnsibleLookupError(f"Failed to find secret {term} (marked for deletion)") + elif on_deleted == "warn": + self._display.warning(f"Skipping, did not find secret (marked for deletion) {term}") + except is_boto3_error_code("ResourceNotFoundException"): # pylint: disable=duplicate-except + if on_missing == "error": + raise AnsibleLookupError(f"Failed to find secret {term} (ResourceNotFound)") + elif on_missing == "warn": + self._display.warning(f"Skipping, did not find secret {term}") + except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except + if on_denied == "error": + raise AnsibleLookupError(f"Failed to access secret {term} (AccessDenied)") + elif on_denied == "warn": + self._display.warning(f"Skipping, access denied for secret {term}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleLookupError(f"Failed to retrieve secret: {to_native(e)}") + + return None diff --git a/ansible_collections/amazon/aws/plugins/lookup/ssm_parameter.py b/ansible_collections/amazon/aws/plugins/lookup/ssm_parameter.py new file mode 100644 index 000000000..0ca3afdd8 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/ssm_parameter.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- + +# (c) 2016, Bill Wang <ozbillwang(at)gmail.com> +# (c) 2017, Marat Bakeev <hawara(at)gmail.com> +# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com> +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: ssm_parameter +author: + - Bill Wang (!UNKNOWN) <ozbillwang(at)gmail.com> + - Marat Bakeev (!UNKNOWN) <hawara(at)gmail.com> + - Michael De La Rue (!UNKNOWN) <siblemitcom.mddlr@spamgourmet.com> +short_description: gets the value for a SSM parameter or all parameters under a path +description: + - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters. + The first argument you pass the lookup can either be a parameter name or a hierarchy of + parameters. Hierarchies start with a forward slash and end with the parameter name. Up to + 5 layers may be specified. + - If looking up an explicitly listed parameter by name which does not exist then the lookup + will generate an error. You can use the C(default) filter to give a default value in + this case but must set the I(on_missing) parameter to C(skip) or C(warn). You must + also set the second parameter of the C(default) filter to C(true) (see examples below). + - When looking up a path for parameters under it a dictionary will be returned for each path. + If there is no parameter under that path then the lookup will generate an error. + - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm + will generate an error. If you want to continue in this case then you will have to set up + two ansible tasks, one which sets a variable and ignores failures and one which uses the value + of that variable with a default. See the examples below. + - Prior to release 6.0.0 this module was known as C(aws_ssm), the usage remains the same. + +options: + decrypt: + description: A boolean to indicate whether to decrypt the parameter. + default: true + type: boolean + bypath: + description: A boolean to indicate whether the parameter is provided as a hierarchy. + default: false + type: boolean + recursive: + description: A boolean to indicate whether to retrieve all parameters within a hierarchy. + default: false + type: boolean + shortnames: + description: Indicates whether to return the name only without path if using a parameter hierarchy. + default: false + type: boolean + on_missing: + description: + - Action to take if the SSM parameter is missing. + - C(error) will raise a fatal error when the SSM parameter is missing. + - C(skip) will silently ignore the missing SSM parameter. + - C(warn) will skip over the missing SSM parameter but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 + on_denied: + description: + - Action to take if access to the SSM parameter is denied. + - C(error) will raise a fatal error when access to the SSM parameter is denied. + - C(skip) will silently ignore the denied SSM parameter. + - C(warn) will skip over the denied SSM parameter but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins +""" + +EXAMPLES = r""" +# lookup sample: +- name: lookup ssm parameter store in the current region + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello' ) }}" + +- name: lookup ssm parameter store in specified region + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello', region='us-east-2' ) }}" + +- name: lookup ssm parameter store without decryption + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello', decrypt=False ) }}" + +- name: lookup ssm parameter store using a specified aws profile + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello', profile='myprofile' ) }}" + +- name: lookup ssm parameter store using explicit aws credentials + debug: + msg: >- + {{ lookup('amazon.aws.aws_ssm', 'Hello', access_key=my_aws_access_key, secret_key=my_aws_secret_key, session_token=my_session_token ) }}" + +- name: lookup ssm parameter store with all options + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello', decrypt=false, region='us-east-2', profile='myprofile') }}" + +- name: lookup ssm parameter and fail if missing + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'missing-parameter') }}" + +- name: lookup a key which doesn't exist, returning a default ('root') + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'AdminID', on_missing="skip") | default('root', true) }}" + +- name: lookup a key which doesn't exist failing to store it in a fact + set_fact: + temp_secret: "{{ lookup('amazon.aws.aws_ssm', '/NoAccess/hiddensecret') }}" + ignore_errors: true + +- name: show fact default to "access failed" if we don't have access + debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}" + +- name: return a dictionary of ssm parameters from a hierarchy path + debug: msg="{{ lookup('amazon.aws.aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}" + +- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param) + debug: msg="{{ lookup('amazon.aws.aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}" + +- name: Iterate over a parameter hierarchy (one iteration per parameter) + debug: msg='Key contains {{ item.key }} , with value {{ item.value }}' + loop: "{{ lookup('amazon.aws.aws_ssm', '/demo/', region='ap-southeast-2', bypath=True) | dict2items }}" + +- name: Iterate over multiple paths as dictionaries (one iteration per path) + debug: msg='Path contains {{ item }}' + loop: "{{ lookup('amazon.aws.aws_ssm', '/demo/', '/demo1/', bypath=True)}}" + +- name: lookup ssm parameter warn if access is denied + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'missing-parameter', on_denied="warn" ) }}" +""" + +try: + import botocore +except ImportError: + pass # Handled by AWSLookupBase + +from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_native +from ansible.module_utils.six import string_types +from ansible.utils.display import Display + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.plugin_utils.lookup import AWSLookupBase + +display = Display() + + +class LookupModule(AWSLookupBase): + def run(self, terms, variables, **kwargs): + """ + :arg terms: a list of lookups to run. + e.g. ['parameter_name', 'parameter_name_too' ] + :kwarg variables: ansible variables active at the time of the lookup + :returns: A list of parameter values or a list of dictionaries if bypath=True. + """ + + super().run(terms, variables, **kwargs) + + on_missing = self.get_option("on_missing") + on_denied = self.get_option("on_denied") + + # validate arguments 'on_missing' and 'on_denied' + if on_missing is not None and ( + not isinstance(on_missing, string_types) or on_missing.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_missing" must be a string and one of "error", "warn" or "skip", not {on_missing}' + ) + if on_denied is not None and ( + not isinstance(on_denied, string_types) or on_denied.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_denied" must be a string and one of "error", "warn" or "skip", not {on_denied}' + ) + + ret = [] + ssm_dict = {} + + client = self.client("ssm", AWSRetry.jittered_backoff()) + + ssm_dict["WithDecryption"] = self.get_option("decrypt") + + # Lookup by path + if self.get_option("bypath"): + ssm_dict["Recursive"] = self.get_option("recursive") + for term in terms: + display.vvv(f"AWS_ssm path lookup term: {term} in region: {self.region}") + + paramlist = self.get_path_parameters(client, ssm_dict, term, on_missing.lower(), on_denied.lower()) + # Shorten parameter names. Yes, this will return + # duplicate names with different values. + if self.get_option("shortnames"): + for x in paramlist: + x["Name"] = x["Name"][x["Name"].rfind("/") + 1:] # fmt: skip + + display.vvvv(f"AWS_ssm path lookup returned: {to_native(paramlist)}") + + ret.append( + boto3_tag_list_to_ansible_dict(paramlist, tag_name_key_name="Name", tag_value_key_name="Value") + ) + # Lookup by parameter name - always returns a list with one or + # no entry. + else: + display.vvv(f"AWS_ssm name lookup term: {terms}") + for term in terms: + ret.append(self.get_parameter_value(client, ssm_dict, term, on_missing.lower(), on_denied.lower())) + display.vvvv(f"AWS_ssm path lookup returning: {to_native(ret)} ") + return ret + + def get_path_parameters(self, client, ssm_dict, term, on_missing, on_denied): + ssm_dict["Path"] = term + paginator = client.get_paginator("get_parameters_by_path") + try: + paramlist = paginator.paginate(**ssm_dict).build_full_result()["Parameters"] + except is_boto3_error_code("AccessDeniedException"): + if on_denied == "error": + raise AnsibleLookupError(f"Failed to access SSM parameter path {term} (AccessDenied)") + elif on_denied == "warn": + self.warn(f"Skipping, access denied for SSM parameter path {term}") + paramlist = [{}] + elif on_denied == "skip": + paramlist = [{}] + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + raise AnsibleLookupError(f"SSM lookup exception: {to_native(e)}") + + if not len(paramlist): + if on_missing == "error": + raise AnsibleLookupError(f"Failed to find SSM parameter path {term} (ResourceNotFound)") + elif on_missing == "warn": + self.warn(f"Skipping, did not find SSM parameter path {term}") + + return paramlist + + def get_parameter_value(self, client, ssm_dict, term, on_missing, on_denied): + ssm_dict["Name"] = term + try: + response = client.get_parameter(aws_retry=True, **ssm_dict) + return response["Parameter"]["Value"] + except is_boto3_error_code("ParameterNotFound"): + if on_missing == "error": + raise AnsibleLookupError(f"Failed to find SSM parameter {term} (ResourceNotFound)") + elif on_missing == "warn": + self.warn(f"Skipping, did not find SSM parameter {term}") + except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except + if on_denied == "error": + raise AnsibleLookupError(f"Failed to access SSM parameter {term} (AccessDenied)") + elif on_denied == "warn": + self.warn(f"Skipping, access denied for SSM parameter {term}") + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + raise AnsibleLookupError(f"SSM lookup exception: {to_native(e)}") + return None diff --git a/ansible_collections/amazon/aws/plugins/module_utils/_version.py b/ansible_collections/amazon/aws/plugins/module_utils/_version.py deleted file mode 100644 index d91cf3ab4..000000000 --- a/ansible_collections/amazon/aws/plugins/module_utils/_version.py +++ /dev/null @@ -1,344 +0,0 @@ -# Vendored copy of distutils/version.py from CPython 3.9.5 -# -# Implements multiple version numbering conventions for the -# Python Module Distribution Utilities. -# -# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0) -# - -"""Provides classes to represent module version numbers (one class for -each style of version numbering). There are currently two such classes -implemented: StrictVersion and LooseVersion. - -Every version number class implements the following interface: - * the 'parse' method takes a string and parses it to some internal - representation; if the string is an invalid version number, - 'parse' raises a ValueError exception - * the class constructor takes an optional string argument which, - if supplied, is passed to 'parse' - * __str__ reconstructs the string that was passed to 'parse' (or - an equivalent string -- ie. one that will generate an equivalent - version number instance) - * __repr__ generates Python code to recreate the version number instance - * _cmp compares the current instance with either another instance - of the same class or a string (which will be parsed to an instance - of the same class, thus must follow the same rules) -""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import re - -try: - RE_FLAGS = re.VERBOSE | re.ASCII -except AttributeError: - RE_FLAGS = re.VERBOSE - - -class Version: - """Abstract base class for version numbering classes. Just provides - constructor (__init__) and reproducer (__repr__), because those - seem to be the same for all version numbering classes; and route - rich comparisons to _cmp. - """ - - def __init__(self, vstring=None): - if vstring: - self.parse(vstring) - - def __repr__(self): - return "%s ('%s')" % (self.__class__.__name__, str(self)) - - def __eq__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c == 0 - - def __lt__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c < 0 - - def __le__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c <= 0 - - def __gt__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c > 0 - - def __ge__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c >= 0 - - -# Interface for version-number classes -- must be implemented -# by the following classes (the concrete ones -- Version should -# be treated as an abstract class). -# __init__ (string) - create and take same action as 'parse' -# (string parameter is optional) -# parse (string) - convert a string representation to whatever -# internal representation is appropriate for -# this style of version numbering -# __str__ (self) - convert back to a string; should be very similar -# (if not identical to) the string supplied to parse -# __repr__ (self) - generate Python code to recreate -# the instance -# _cmp (self, other) - compare two version numbers ('other' may -# be an unparsed version string, or another -# instance of your version class) - - -class StrictVersion(Version): - """Version numbering for anal retentives and software idealists. - Implements the standard interface for version number classes as - described above. A version number consists of two or three - dot-separated numeric components, with an optional "pre-release" tag - on the end. The pre-release tag consists of the letter 'a' or 'b' - followed by a number. If the numeric components of two version - numbers are equal, then one with a pre-release tag will always - be deemed earlier (lesser) than one without. - - The following are valid version numbers (shown in the order that - would be obtained by sorting according to the supplied cmp function): - - 0.4 0.4.0 (these two are equivalent) - 0.4.1 - 0.5a1 - 0.5b3 - 0.5 - 0.9.6 - 1.0 - 1.0.4a3 - 1.0.4b1 - 1.0.4 - - The following are examples of invalid version numbers: - - 1 - 2.7.2.2 - 1.3.a4 - 1.3pl1 - 1.3c4 - - The rationale for this version numbering system will be explained - in the distutils documentation. - """ - - version_re = re.compile(r"^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$", RE_FLAGS) - - def parse(self, vstring): - match = self.version_re.match(vstring) - if not match: - raise ValueError("invalid version number '%s'" % vstring) - - (major, minor, patch, prerelease, prerelease_num) = match.group(1, 2, 4, 5, 6) - - if patch: - self.version = tuple(map(int, [major, minor, patch])) - else: - self.version = tuple(map(int, [major, minor])) + (0,) - - if prerelease: - self.prerelease = (prerelease[0], int(prerelease_num)) - else: - self.prerelease = None - - def __str__(self): - if self.version[2] == 0: - vstring = ".".join(map(str, self.version[0:2])) - else: - vstring = ".".join(map(str, self.version)) - - if self.prerelease: - vstring = vstring + self.prerelease[0] + str(self.prerelease[1]) - - return vstring - - def _cmp(self, other): - if isinstance(other, str): - other = StrictVersion(other) - elif not isinstance(other, StrictVersion): - return NotImplemented - - if self.version != other.version: - # numeric versions don't match - # prerelease stuff doesn't matter - if self.version < other.version: - return -1 - else: - return 1 - - # have to compare prerelease - # case 1: neither has prerelease; they're equal - # case 2: self has prerelease, other doesn't; other is greater - # case 3: self doesn't have prerelease, other does: self is greater - # case 4: both have prerelease: must compare them! - - if not self.prerelease and not other.prerelease: - return 0 - elif self.prerelease and not other.prerelease: - return -1 - elif not self.prerelease and other.prerelease: - return 1 - elif self.prerelease and other.prerelease: - if self.prerelease == other.prerelease: - return 0 - elif self.prerelease < other.prerelease: - return -1 - else: - return 1 - else: - raise AssertionError("never get here") - - -# end class StrictVersion - -# The rules according to Greg Stein: -# 1) a version number has 1 or more numbers separated by a period or by -# sequences of letters. If only periods, then these are compared -# left-to-right to determine an ordering. -# 2) sequences of letters are part of the tuple for comparison and are -# compared lexicographically -# 3) recognize the numeric components may have leading zeroes -# -# The LooseVersion class below implements these rules: a version number -# string is split up into a tuple of integer and string components, and -# comparison is a simple tuple comparison. This means that version -# numbers behave in a predictable and obvious way, but a way that might -# not necessarily be how people *want* version numbers to behave. There -# wouldn't be a problem if people could stick to purely numeric version -# numbers: just split on period and compare the numbers as tuples. -# However, people insist on putting letters into their version numbers; -# the most common purpose seems to be: -# - indicating a "pre-release" version -# ('alpha', 'beta', 'a', 'b', 'pre', 'p') -# - indicating a post-release patch ('p', 'pl', 'patch') -# but of course this can't cover all version number schemes, and there's -# no way to know what a programmer means without asking him. -# -# The problem is what to do with letters (and other non-numeric -# characters) in a version number. The current implementation does the -# obvious and predictable thing: keep them as strings and compare -# lexically within a tuple comparison. This has the desired effect if -# an appended letter sequence implies something "post-release": -# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002". -# -# However, if letters in a version number imply a pre-release version, -# the "obvious" thing isn't correct. Eg. you would expect that -# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison -# implemented here, this just isn't so. -# -# Two possible solutions come to mind. The first is to tie the -# comparison algorithm to a particular set of semantic rules, as has -# been done in the StrictVersion class above. This works great as long -# as everyone can go along with bondage and discipline. Hopefully a -# (large) subset of Python module programmers will agree that the -# particular flavour of bondage and discipline provided by StrictVersion -# provides enough benefit to be worth using, and will submit their -# version numbering scheme to its domination. The free-thinking -# anarchists in the lot will never give in, though, and something needs -# to be done to accommodate them. -# -# Perhaps a "moderately strict" version class could be implemented that -# lets almost anything slide (syntactically), and makes some heuristic -# assumptions about non-digits in version number strings. This could -# sink into special-case-hell, though; if I was as talented and -# idiosyncratic as Larry Wall, I'd go ahead and implement a class that -# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is -# just as happy dealing with things like "2g6" and "1.13++". I don't -# think I'm smart enough to do it right though. -# -# In any case, I've coded the test suite for this module (see -# ../test/test_version.py) specifically to fail on things like comparing -# "1.2a2" and "1.2". That's not because the *code* is doing anything -# wrong, it's because the simple, obvious design doesn't match my -# complicated, hairy expectations for real-world version numbers. It -# would be a snap to fix the test suite to say, "Yep, LooseVersion does -# the Right Thing" (ie. the code matches the conception). But I'd rather -# have a conception that matches common notions about version numbers. - - -class LooseVersion(Version): - """Version numbering for anarchists and software realists. - Implements the standard interface for version number classes as - described above. A version number consists of a series of numbers, - separated by either periods or strings of letters. When comparing - version numbers, the numeric components will be compared - numerically, and the alphabetic components lexically. The following - are all valid version numbers, in no particular order: - - 1.5.1 - 1.5.2b2 - 161 - 3.10a - 8.02 - 3.4j - 1996.07.12 - 3.2.pl0 - 3.1.1.6 - 2g6 - 11g - 0.960923 - 2.2beta29 - 1.13++ - 5.5.kw - 2.0b1pl0 - - In fact, there is no such thing as an invalid version number under - this scheme; the rules for comparison are simple and predictable, - but may not always give the results you want (for some definition - of "want"). - """ - - component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE) - - def __init__(self, vstring=None): - if vstring: - self.parse(vstring) - - def parse(self, vstring): - # I've given up on thinking I can reconstruct the version string - # from the parsed tuple -- so I just store the string here for - # use by __str__ - self.vstring = vstring - components = [x for x in self.component_re.split(vstring) if x and x != "."] - for i, obj in enumerate(components): - try: - components[i] = int(obj) - except ValueError: - pass - - self.version = components - - def __str__(self): - return self.vstring - - def __repr__(self): - return "LooseVersion ('%s')" % str(self) - - def _cmp(self, other): - if isinstance(other, str): - other = LooseVersion(other) - elif not isinstance(other, LooseVersion): - return NotImplemented - - if self.version == other.version: - return 0 - if self.version < other.version: - return -1 - if self.version > other.version: - return 1 - - -# end class LooseVersion diff --git a/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/ansible_collections/amazon/aws/plugins/module_utils/acm.py index 81c65507e..ab3a9f073 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/acm.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/acm.py @@ -1,21 +1,8 @@ # -*- coding: utf-8 -*- -# + # Copyright (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see <http://www.gnu.org/licenses/>. -# + # Author: # - Matthew Davis <Matthew.Davis.2@team.telstra.com> # on behalf of Telstra Corporation Limited @@ -24,199 +11,239 @@ # - acm_certificate # - acm_certificate_info -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - """ Common Amazon Certificate Manager facts shared between modules """ try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass from ansible.module_utils._text import to_bytes from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from .core import is_boto3_error_code -from .ec2 import AWSRetry -from .ec2 import ansible_dict_to_boto3_tag_list -from .ec2 import boto3_tag_list_to_ansible_dict +from .botocore import is_boto3_error_code +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict + + +def acm_catch_boto_exception(func): + def runner(*args, **kwargs): + module = kwargs.pop("module", None) + error = kwargs.pop("error", None) + ignore_error_codes = kwargs.pop("ignore_error_codes", []) + + try: + return func(*args, **kwargs) + except is_boto3_error_code(ignore_error_codes): + return None + except (BotoCoreError, ClientError) as e: + if not module: + raise + module.fail_json_aws(e, msg=error) + + return runner -class ACMServiceManager(object): +class ACMServiceManager: """Handles ACM Facts Services""" def __init__(self, module): self.module = module - self.client = module.client('acm') - - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) - def delete_certificate_with_backoff(self, client, arn): - client.delete_certificate(CertificateArn=arn) + self.client = module.client("acm") + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"]) + def delete_certificate_with_backoff(self, arn): + self.client.delete_certificate(CertificateArn=arn) + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"]) + def list_certificates_with_backoff(self, statuses=None): + paginator = self.client.get_paginator("list_certificates") + # `list_certificates` requires explicit key type filter, or it returns only RSA_2048 certificates + kwargs = { + "Includes": { + "keyTypes": [ + "RSA_1024", + "RSA_2048", + "RSA_3072", + "RSA_4096", + "EC_prime256v1", + "EC_secp384r1", + "EC_secp521r1", + ], + }, + } + if statuses: + kwargs["CertificateStatuses"] = statuses + return paginator.paginate(**kwargs).build_full_result()["CertificateSummaryList"] + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff( + delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"] + ) + def get_certificate_with_backoff(self, certificate_arn): + response = self.client.get_certificate(CertificateArn=certificate_arn) + # strip out response metadata + return {"Certificate": response["Certificate"], "CertificateChain": response["CertificateChain"]} + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff( + delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"] + ) + def describe_certificate_with_backoff(self, certificate_arn): + return self.client.describe_certificate(CertificateArn=certificate_arn)["Certificate"] + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff( + delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"] + ) + def list_certificate_tags_with_backoff(self, certificate_arn): + return self.client.list_tags_for_certificate(CertificateArn=certificate_arn)["Tags"] + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"]) + def import_certificate_with_backoff(self, certificate, private_key, certificate_chain, arn): + params = {"Certificate": to_bytes(certificate), "PrivateKey": to_bytes(private_key)} + if arn: + params["CertificateArn"] = arn + if certificate_chain: + params["CertificateChain"] = certificate_chain - def delete_certificate(self, client, module, arn): - module.debug("Attempting to delete certificate %s" % arn) - try: - self.delete_certificate_with_backoff(client, arn) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn) - module.debug("Successfully deleted certificate %s" % arn) + return self.client.import_certificate(**params)["CertificateArn"] - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) - def list_certificates_with_backoff(self, client, statuses=None): - paginator = client.get_paginator('list_certificates') - kwargs = dict() - if statuses: - kwargs['CertificateStatuses'] = statuses - return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList'] + # Tags are a normal Ansible style dict + # {'Key':'Value'} + @AWSRetry.jittered_backoff( + delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"] + ) + def tag_certificate_with_backoff(self, arn, tags): + aws_tags = ansible_dict_to_boto3_tag_list(tags) + self.client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags) - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) - def get_certificate_with_backoff(self, client, certificate_arn): - response = client.get_certificate(CertificateArn=certificate_arn) - # strip out response metadata - return {'Certificate': response['Certificate'], - 'CertificateChain': response['CertificateChain']} - - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) - def describe_certificate_with_backoff(self, client, certificate_arn): - return client.describe_certificate(CertificateArn=certificate_arn)['Certificate'] - - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) - def list_certificate_tags_with_backoff(self, client, certificate_arn): - return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags'] - - # Returns a list of certificates - # if domain_name is specified, returns only certificates with that domain - # if an ARN is specified, returns only that certificate - # only_tags is a dict, e.g. {'key':'value'}. If specified this function will return - # only certificates which contain all those tags (key exists, value matches). - def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None): + def _match_tags(self, ref_tags, cert_tags): + if ref_tags is None: + return True try: - all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't obtain certificates") - if domain_name: - certificates = [cert for cert in all_certificates - if cert['DomainName'] == domain_name] - else: - certificates = all_certificates + return all(k in cert_tags for k in ref_tags) and all(cert_tags.get(k) == ref_tags[k] for k in ref_tags) + except (TypeError, AttributeError) as e: + self.module.fail_json_aws(e, msg="ACM tag filtering err") - if arn: - # still return a list, not just one item - certificates = [c for c in certificates if c['CertificateArn'] == arn] + def delete_certificate(self, *args, arn=None): + # hacking for backward compatibility + if arn is None: + if len(args) < 3: + self.module.fail_json(msg="Missing required certificate arn to delete.") + arn = args[2] + error = f"Couldn't delete certificate {arn}" + self.delete_certificate_with_backoff(arn, module=self.module, error=error) + + def get_certificates(self, *args, domain_name=None, statuses=None, arn=None, only_tags=None, **kwargs): + """ + Returns a list of certificates + if domain_name is specified, returns only certificates with that domain + if an ARN is specified, returns only that certificate + only_tags is a dict, e.g. {'key':'value'}. If specified this function will return + only certificates which contain all those tags (key exists, value matches). + """ + all_certificates = self.list_certificates_with_backoff( + statuses=statuses, module=self.module, error="Couldn't obtain certificates" + ) + + def _filter_certificate(cert): + if domain_name and cert["DomainName"] != domain_name: + return False + if arn and cert["CertificateArn"] != arn: + return False + return True + + certificates = list(filter(_filter_certificate, all_certificates)) results = [] for certificate in certificates: - try: - cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn']) - except is_boto3_error_code('ResourceNotFoundException'): - # The certificate was deleted after the call to list_certificates_with_backoff. + cert_data = self.describe_certificate_with_backoff( + certificate["CertificateArn"], + module=self.module, + error=f"Couldn't obtain certificate metadata for domain {certificate['DomainName']}", + ignore_error_codes=["ResourceNotFoundException"], + ) + if cert_data is None: continue - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName']) # in some states, ACM resources do not have a corresponding cert - if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']: - try: - cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn'])) - except is_boto3_error_code('ResourceNotFoundException'): - # The certificate was deleted after the call to list_certificates_with_backoff. + if cert_data["Status"] not in ("PENDING_VALIDATION", "VALIDATION_TIMED_OUT", "FAILED"): + cert_info = self.get_certificate_with_backoff( + certificate["CertificateArn"], + module=self.module, + error=f"Couldn't obtain certificate data for domain {certificate['DomainName']}", + ignore_error_codes=["ResourceNotFoundException"], + ) + if cert_info is None: continue - except (BotoCoreError, ClientError, KeyError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName']) + cert_data.update(cert_info) + cert_data = camel_dict_to_snake_dict(cert_data) - try: - tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn']) - except is_boto3_error_code('ResourceNotFoundException'): - # The certificate was deleted after the call to list_certificates_with_backoff. + tags = self.list_certificate_tags_with_backoff( + certificate["CertificateArn"], + module=self.module, + error=f"Couldn't obtain tags for domain {certificate['DomainName']}", + ignore_error_codes=["ResourceNotFoundException"], + ) + if tags is None: continue - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName']) - cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags) + tags = boto3_tag_list_to_ansible_dict(tags) + if not self._match_tags(only_tags, tags): + continue + cert_data["tags"] = tags results.append(cert_data) - - if only_tags: - for tag_key in only_tags: - try: - results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])] - except (TypeError, AttributeError) as e: - for c in results: - if 'tags' not in c: - module.debug("cert is %s" % str(c)) - module.fail_json(msg="ACM tag filtering err", exception=e) - return results - # returns the domain name of a certificate (encoded in the public cert) - # for a given ARN - # A cert with that ARN must already exist - def get_domain_of_cert(self, client, module, arn): + def get_domain_of_cert(self, arn, **kwargs): + """ + returns the domain name of a certificate (encoded in the public cert) + for a given ARN A cert with that ARN must already exist + """ if arn is None: - module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified") - try: - cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn) - return cert_data['DomainName'] - - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) - def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn): - if certificate_chain: - if arn: - ret = client.import_certificate(Certificate=to_bytes(certificate), - PrivateKey=to_bytes(private_key), - CertificateChain=to_bytes(certificate_chain), - CertificateArn=arn) - else: - ret = client.import_certificate(Certificate=to_bytes(certificate), - PrivateKey=to_bytes(private_key), - CertificateChain=to_bytes(certificate_chain)) - else: - if arn: - ret = client.import_certificate(Certificate=to_bytes(certificate), - PrivateKey=to_bytes(private_key), - CertificateArn=arn) - else: - ret = client.import_certificate(Certificate=to_bytes(certificate), - PrivateKey=to_bytes(private_key)) - return ret['CertificateArn'] - - # Tags are a normal Ansible style dict - # {'Key':'Value'} - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) - def tag_certificate_with_backoff(self, client, arn, tags): - aws_tags = ansible_dict_to_boto3_tag_list(tags) - client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags) - - def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None): + self.module.fail_json(msg="Internal error with ACM domain fetching, no certificate ARN specified") + error = f"Couldn't obtain certificate data for arn {arn}" + cert_data = self.describe_certificate_with_backoff(certificate_arn=arn, module=self.module, error=error) + return cert_data["DomainName"] + def import_certificate(self, *args, certificate, private_key, arn=None, certificate_chain=None, tags=None): original_arn = arn # upload cert - try: - arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't upload new certificate") - + params = { + "certificate": certificate, + "private_key": private_key, + "certificate_chain": certificate_chain, + "arn": arn, + "module": self.module, + "error": "Couldn't upload new certificate", + } + arn = self.import_certificate_with_backoff(**params) if original_arn and (arn != original_arn): # I'm not sure whether the API guarentees that the ARN will not change # I'm failing just in case. # If I'm wrong, I'll catch it in the integration tests. - module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn)) + self.module.fail_json(msg=f"ARN changed with ACM update, from {original_arn} to {arn}") # tag that cert try: - self.tag_certificate_with_backoff(client, arn, tags) + self.tag_certificate_with_backoff(arn, tags) except (BotoCoreError, ClientError) as e: - module.debug("Attempting to delete the cert we just created, arn=%s" % arn) try: - self.delete_certificate_with_backoff(client, arn) + self.delete_certificate_with_backoff(arn) except (BotoCoreError, ClientError): - module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.") - module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn) - module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn) + self.module.warn( + f"Certificate {arn} exists, and is not tagged. So Ansible will not see it on the next run." + ) + self.module.fail_json_aws(e, msg=f"Couldn't tag certificate {arn}, couldn't delete it either") + self.module.fail_json_aws(e, msg=f"Couldn't tag certificate {arn}") return arn diff --git a/ansible_collections/amazon/aws/plugins/module_utils/arn.py b/ansible_collections/amazon/aws/plugins/module_utils/arn.py index ac8dfc9e0..d62b4c4d8 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/arn.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/arn.py @@ -1,35 +1,51 @@ -# +# -*- coding: utf-8 -*- + # Copyright 2017 Michael De La Rue | Ansible -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) import re +def validate_aws_arn( + arn, partition=None, service=None, region=None, account_id=None, resource=None, resource_type=None, resource_id=None +): + details = parse_aws_arn(arn) + + if not details: + return False + + if partition and details.get("partition") != partition: + return False + if service and details.get("service") != service: + return False + if region and details.get("region") != region: + return False + if account_id and details.get("account_id") != account_id: + return False + if resource and details.get("resource") != resource: + return False + if resource_type and details.get("resource_type") != resource_type: + return False + if resource_id and details.get("resource_id") != resource_id: + return False + + return True + + def parse_aws_arn(arn): """ + Based on https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html + The following are the general formats for ARNs. arn:partition:service:region:account-id:resource-id arn:partition:service:region:account-id:resource-type/resource-id arn:partition:service:region:account-id:resource-type:resource-id The specific formats depend on the resource. The ARNs for some resources omit the Region, the account ID, or both the Region and the account ID. + + Note: resource_type handling is very naive, for complex cases it may be necessary to use + "resource" directly instead of resource_type, this will include the resource type and full ID, + including all paths. """ m = re.search(r"arn:(aws(-([a-z\-]+))?):([\w-]+):([a-z0-9\-]*):(\d*|aws|aws-managed):(.*)", arn) if m is None: @@ -41,6 +57,12 @@ def parse_aws_arn(arn): result.update(dict(account_id=m.group(6))) result.update(dict(resource=m.group(7))) + m2 = re.search(r"^(.*?)[:/](.+)$", m.group(7)) + if m2 is None: + result.update(dict(resource_type=None, resource_id=m.group(7))) + else: + result.update(dict(resource_type=m2.group(1), resource_id=m2.group(2))) + return result @@ -59,11 +81,11 @@ def is_outpost_arn(arn): if not details: return False - service = details.get('service') or "" - if service.lower() != 'outposts': + service = details.get("service") or "" + if service.lower() != "outposts": return False - resource = details.get('resource') or "" - if not re.match('^outpost/op-[a-f0-9]{17}$', resource): + resource = details.get("resource") or "" + if not re.match("^outpost/op-[a-f0-9]{17}$", resource): return False return True diff --git a/ansible_collections/amazon/aws/plugins/module_utils/backup.py b/ansible_collections/amazon/aws/plugins/module_utils/backup.py new file mode 100644 index 000000000..907879a8a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/backup.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +try: + import botocore +except ImportError: + pass # Handled by HAS_BOTO3 + +from typing import Union + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + + +def get_backup_resource_tags(module, backup_client, resource): + try: + response = backup_client.list_tags(ResourceArn=resource) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Failed to list tags on the resource {resource}") + + return response["Tags"] + + +def _list_backup_plans(client, backup_plan_name): + first_iteration = False + next_token = None + + # We can not use the paginator at the moment because if was introduced after boto3 version 1.22 + # paginator = client.get_paginator("list_backup_plans") + # result = paginator.paginate(**params).build_full_result()["BackupPlansList"] + + response = client.list_backup_plans() + next_token = response.get("NextToken", None) + + if next_token is None: + entries = response["BackupPlansList"] + for backup_plan in entries: + if backup_plan_name == backup_plan["BackupPlanName"]: + return backup_plan["BackupPlanId"] + + while next_token is not None: + if first_iteration: + response = client.list_backup_plans(NextToken=next_token) + first_iteration = True + entries = response["BackupPlansList"] + for backup_plan in entries: + if backup_plan_name == backup_plan["BackupPlanName"]: + return backup_plan["BackupPlanId"] + next_token = response.get("NextToken") + + +def get_plan_details(module, client, backup_plan_name: str): + backup_plan_id = _list_backup_plans(client, backup_plan_name) + + if not backup_plan_id: + return [] + + try: + result = client.get_backup_plan(BackupPlanId=backup_plan_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Failed to describe plan {backup_plan_id}") + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_backup_plan = [] + + try: + resource = result.get("BackupPlanArn", None) + tag_dict = get_backup_resource_tags(module, client, resource) + result.update({"tags": tag_dict}) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get the backup plan tags") + + snaked_backup_plan.append(camel_dict_to_snake_dict(result, ignore_list="tags")) + + # Remove AWS API response and add top-level plan name + for v in snaked_backup_plan: + if "response_metadata" in v: + del v["response_metadata"] + v["backup_plan_name"] = v["backup_plan"]["backup_plan_name"] + + return snaked_backup_plan + + +def _list_backup_selections(client, module, plan_id): + first_iteration = False + next_token = None + selections = [] + + # We can not use the paginator at the moment because if was introduced after boto3 version 1.22 + # paginator = client.get_paginator("list_backup_selections") + # result = paginator.paginate(**params).build_full_result()["BackupSelectionsList"] + + try: + response = client.list_backup_selections(BackupPlanId=plan_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list AWS backup selections") + + next_token = response.get("NextToken", None) + + if next_token is None: + return response["BackupSelectionsList"] + + while next_token: + if first_iteration: + try: + response = client.list_backup_selections(BackupPlanId=plan_id, NextToken=next_token) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list AWS backup selections") + first_iteration = True + selections.append(response["BackupSelectionsList"]) + next_token = response.get("NextToken") + + +def _get_backup_selection(client, module, plan_id, selection_id): + try: + result = client.get_backup_selection(BackupPlanId=plan_id, SelectionId=selection_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Failed to describe selection {selection_id}") + return result or [] + + +def get_selection_details(module, client, plan_name: str, selection_name: Union[str, list]): + result = [] + + plan = get_plan_details(module, client, plan_name) + + if not plan: + module.fail_json(msg=f"The backup plan {plan_name} does not exist. Please create one first.") + + plan_id = plan[0]["backup_plan_id"] + + selection_list = _list_backup_selections(client, module, plan_id) + + if selection_name: + for selection in selection_list: + if isinstance(selection_name, list): + for name in selection_name: + if selection["SelectionName"] == name: + selection_id = selection["SelectionId"] + selection_info = _get_backup_selection(client, module, plan_id, selection_id) + result.append(selection_info) + if isinstance(selection_name, str): + if selection["SelectionName"] == selection_name: + selection_id = selection["SelectionId"] + result.append(_get_backup_selection(client, module, plan_id, selection_id)) + break + else: + for selection in selection_list: + selection_id = selection["SelectionId"] + result.append(_get_backup_selection(client, module, plan_id, selection_id)) + + for v in result: + if "ResponseMetadata" in v: + del v["ResponseMetadata"] + if "BackupSelection" in v: + for backup_selection_key in v["BackupSelection"]: + v[backup_selection_key] = v["BackupSelection"][backup_selection_key] + del v["BackupSelection"] + + return result diff --git a/ansible_collections/amazon/aws/plugins/module_utils/batch.py b/ansible_collections/amazon/aws/plugins/module_utils/batch.py index c27214519..47281307e 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/batch.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/batch.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # # This code is part of Ansible, but is an independent component. @@ -24,14 +26,11 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# + """ This module adds shared support for Batch modules. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict @@ -43,7 +42,7 @@ def cc(key): :param key: :return: """ - components = key.split('_') + components = key.split("_") return components[0] + "".join([token.capitalize() for token in components[1:]]) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py index a8a014c20..858e4e593 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -31,9 +33,6 @@ A set of helper functions designed to help with initializing boto3/botocore connections. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json import os import traceback @@ -42,19 +41,43 @@ BOTO3_IMP_ERR = None try: import boto3 import botocore + HAS_BOTO3 = True except ImportError: BOTO3_IMP_ERR = traceback.format_exc() HAS_BOTO3 = False +try: + from packaging.version import Version + + HAS_PACKAGING = True +except ImportError: + HAS_PACKAGING = False + from ansible.module_utils._text import to_native from ansible.module_utils.ansible_release import __version__ from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.six import binary_type from ansible.module_utils.six import text_type +from .common import get_collection_info +from .exceptions import AnsibleBotocoreError from .retries import AWSRetry +MINIMUM_BOTOCORE_VERSION = "1.29.0" +MINIMUM_BOTO3_VERSION = "1.26.0" + + +def _get_user_agent_string(): + info = get_collection_info() + result = f"APN/1.0 Ansible/{__version__}" + if info["name"]: + if info["version"] is not None: + result += f" {info['name']}/{info['version']}" + else: + result += f" {info['name']}" + return result + def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): """ @@ -68,13 +91,35 @@ def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None try: return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) except ValueError as e: - module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e)) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, - botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e: - module.fail_json(msg=to_native(e)) + module.fail_json( + msg=f"Couldn't connect to AWS: {to_native(e)}", + ) + except ( + botocore.exceptions.ProfileNotFound, + botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, + botocore.exceptions.ConfigParseError, + ) as e: + module.fail_json( + msg=to_native(e), + ) except botocore.exceptions.NoRegionError: - module.fail_json(msg="The %s module requires a region and none was found in configuration, " - "environment variables or module parameters" % module._name) + module.fail_json( + msg=f"The {module._name} module requires a region and none was found in configuration, " + "environment variables or module parameters", + ) + + +def _merge_botocore_config(config_a, config_b): + """ + Merges the extra configuration options from config_b into config_a. + Supports both botocore.config.Config objects and dicts + """ + if not config_b: + return config_a + if not isinstance(config_b, botocore.config.Config): + config_b = botocore.config.Config(**config_b) + return config_a.merge(config_b) def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params): @@ -82,22 +127,23 @@ def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **par Builds a boto3 resource/client connection cleanly wrapping the most common failures. No exceptions are caught/handled. """ - profile = params.pop('profile_name', None) - - if conn_type not in ['both', 'resource', 'client']: - raise ValueError('There is an issue in the calling code. You ' - 'must specify either both, resource, or client to ' - 'the conn_type parameter in the boto3_conn function ' - 'call') + profile = params.pop("profile_name", None) + + if conn_type not in ["both", "resource", "client"]: + raise ValueError( + "There is an issue in the calling code. You " + "must specify either both, resource, or client to " + "the conn_type parameter in the boto3_conn function " + "call" + ) + # default config with user agent config = botocore.config.Config( - user_agent_extra='Ansible/{0}'.format(__version__), + user_agent=_get_user_agent_string(), ) - if params.get('config') is not None: - config = config.merge(params.pop('config')) - if params.get('aws_config') is not None: - config = config.merge(params.pop('aws_config')) + for param in ("config", "aws_config"): + config = _merge_botocore_config(config, params.pop(param, None)) session = boto3.session.Session( profile_name=profile, @@ -105,13 +151,13 @@ def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **par enable_placebo(session) - if conn_type == 'resource': + if conn_type == "resource": return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params) - elif conn_type == 'client': + elif conn_type == "client": return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params) else: - client = session.client(resource, region_name=region, endpoint_url=endpoint, **params) - resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params) + client = session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params) + resource = session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params) return client, resource @@ -127,106 +173,77 @@ def boto_exception(err): :param err: Exception from boto :return: Error message """ - if hasattr(err, 'error_message'): + if hasattr(err, "error_message"): error = err.error_message - elif hasattr(err, 'message'): - error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err)) + elif hasattr(err, "message"): + error = str(err.message) + " " + str(err) + " - " + str(type(err)) else: - error = '%s: %s' % (Exception, err) + error = f"{Exception}: {err}" return error -def get_aws_region(module, boto3=None): - region = module.params.get('region') +def _aws_region(params): + region = params.get("region") if region: return region if not HAS_BOTO3: - module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR) + raise AnsibleBotocoreError(message=missing_required_lib("boto3 and botocore"), exception=BOTO3_IMP_ERR) # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. try: - profile_name = module.params.get('profile') - return botocore.session.Session(profile=profile_name).get_config_variable('region') + # Botocore doesn't like empty strings, make sure we default to None in the case of an empty + # string. + profile_name = params.get("profile") or None + return botocore.session.Session(profile=profile_name).get_config_variable("region") except botocore.exceptions.ProfileNotFound: return None -def get_aws_connection_info(module, boto3=None): - - # Check module args for credentials, then check environment vars - # access_key - - endpoint_url = module.params.get('endpoint_url') - access_key = module.params.get('access_key') - secret_key = module.params.get('secret_key') - session_token = module.params.get('session_token') - region = get_aws_region(module) - profile_name = module.params.get('profile') - validate_certs = module.params.get('validate_certs') - ca_bundle = module.params.get('aws_ca_bundle') - config = module.params.get('aws_config') - - # Only read the profile environment variables if we've *not* been passed - # any credentials as parameters. - if not profile_name and not access_key and not secret_key: - if os.environ.get('AWS_PROFILE'): - profile_name = os.environ.get('AWS_PROFILE') - if os.environ.get('AWS_DEFAULT_PROFILE'): - profile_name = os.environ.get('AWS_DEFAULT_PROFILE') - +def get_aws_region(module, boto3=None): + try: + return _aws_region(module.params) + except AnsibleBotocoreError as e: + if e.exception: + module.fail_json(msg=e.message, exception=e.exception) + else: + module.fail_json(msg=e.message) + + +def _aws_connection_info(params): + endpoint_url = params.get("endpoint_url") + access_key = params.get("access_key") + secret_key = params.get("secret_key") + session_token = params.get("session_token") + region = _aws_region(params) + profile_name = params.get("profile") + validate_certs = params.get("validate_certs") + ca_bundle = params.get("aws_ca_bundle") + config = params.get("aws_config") + + # Caught here so that they can be deliberately set to '' to avoid conflicts when environment + # variables are also being used if profile_name and (access_key or secret_key or session_token): - module.fail_json(msg="Passing both a profile and access tokens is not supported.") + raise AnsibleBotocoreError(message="Passing both a profile and access tokens is not supported.") # Botocore doesn't like empty strings, make sure we default to None in the case of an empty # string. if not access_key: - # AWS_ACCESS_KEY_ID is the one supported by the AWS CLI - # AWS_ACCESS_KEY is to match up with our parameter name - if os.environ.get('AWS_ACCESS_KEY_ID'): - access_key = os.environ['AWS_ACCESS_KEY_ID'] - elif os.environ.get('AWS_ACCESS_KEY'): - access_key = os.environ['AWS_ACCESS_KEY'] - # Deprecated - 'EC2' implies just EC2, but is global - elif os.environ.get('EC2_ACCESS_KEY'): - access_key = os.environ['EC2_ACCESS_KEY'] - else: - # in case access_key came in as empty string - access_key = None - + access_key = None if not secret_key: - # AWS_SECRET_ACCESS_KEY is the one supported by the AWS CLI - # AWS_SECRET_KEY is to match up with our parameter name - if os.environ.get('AWS_SECRET_ACCESS_KEY'): - secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] - elif os.environ.get('AWS_SECRET_KEY'): - secret_key = os.environ['AWS_SECRET_KEY'] - # Deprecated - 'EC2' implies just EC2, but is global - elif os.environ.get('EC2_SECRET_KEY'): - secret_key = os.environ['EC2_SECRET_KEY'] - else: - # in case secret_key came in as empty string - secret_key = None - + secret_key = None if not session_token: - # AWS_SESSION_TOKEN is supported by the AWS CLI - if os.environ.get('AWS_SESSION_TOKEN'): - session_token = os.environ['AWS_SESSION_TOKEN'] - # Deprecated - boto - elif os.environ.get('AWS_SECURITY_TOKEN'): - session_token = os.environ['AWS_SECURITY_TOKEN'] - # Deprecated - 'EC2' implies just EC2, but is global - elif os.environ.get('EC2_SECURITY_TOKEN'): - session_token = os.environ['EC2_SECURITY_TOKEN'] - else: - # in case secret_token came in as empty string - session_token = None + session_token = None if profile_name: - boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None) - boto_params['profile_name'] = profile_name + boto_params = dict( + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + profile_name=profile_name, + ) else: boto_params = dict( aws_access_key_id=access_key, @@ -235,20 +252,30 @@ def get_aws_connection_info(module, boto3=None): ) if validate_certs and ca_bundle: - boto_params['verify'] = ca_bundle + boto_params["verify"] = ca_bundle else: - boto_params['verify'] = validate_certs + boto_params["verify"] = validate_certs if config is not None: - boto_params['aws_config'] = botocore.config.Config(**config) + boto_params["aws_config"] = botocore.config.Config(**config) for param, value in boto_params.items(): if isinstance(value, binary_type): - boto_params[param] = text_type(value, 'utf-8', 'strict') + boto_params[param] = text_type(value, "utf-8", "strict") return region, endpoint_url, boto_params +def get_aws_connection_info(module, boto3=None): + try: + return _aws_connection_info(module.params) + except AnsibleBotocoreError as e: + if e.exception: + module.fail_json(msg=e.message, exception=e.exception) + else: + module.fail_json(msg=e.message) + + def _paginated_query(client, paginator_name, **params): paginator = client.get_paginator(paginator_name) result = paginator.paginate(**params).build_full_result() @@ -282,10 +309,11 @@ def gather_sdk_versions(): """ if not HAS_BOTO3: return {} - import boto3 - import botocore - return dict(boto3_version=boto3.__version__, - botocore_version=botocore.__version__) + + return dict( + boto3_version=boto3.__version__, + botocore_version=botocore.__version__, + ) def is_boto3_error_code(code, e=None): @@ -302,14 +330,16 @@ def is_boto3_error_code(code, e=None): # handle the generic error case for all other codes """ from botocore.exceptions import ClientError + if e is None: import sys + dummy, e, dummy = sys.exc_info() if not isinstance(code, list): code = [code] - if isinstance(e, ClientError) and e.response['Error']['Code'] in code: + if isinstance(e, ClientError) and e.response["Error"]["Code"] in code: return ClientError - return type('NeverEverRaisedException', (Exception,), {}) + return type("NeverEverRaisedException", (Exception,), {}) def is_boto3_error_message(msg, e=None): @@ -326,12 +356,14 @@ def is_boto3_error_message(msg, e=None): # handle the generic error case for all other codes """ from botocore.exceptions import ClientError + if e is None: import sys + dummy, e, dummy = sys.exc_info() - if isinstance(e, ClientError) and msg in e.response['Error']['Message']: + if isinstance(e, ClientError) and msg in e.response["Error"]["Message"]: return ClientError - return type('NeverEverRaisedException', (Exception,), {}) + return type("NeverEverRaisedException", (Exception,), {}) def get_boto3_client_method_parameters(client, method_name, required=False): @@ -348,7 +380,7 @@ def get_boto3_client_method_parameters(client, method_name, required=False): # Used by normalize_boto3_result def _boto3_handler(obj): - if hasattr(obj, 'isoformat'): + if hasattr(obj, "isoformat"): return obj.isoformat() else: return obj @@ -371,6 +403,7 @@ def enable_placebo(session): """ if "_ANSIBLE_PLACEBO_RECORD" in os.environ: import placebo + existing_entries = os.listdir(os.environ["_ANSIBLE_PLACEBO_RECORD"]) idx = len(existing_entries) data_path = f"{os.environ['_ANSIBLE_PLACEBO_RECORD']}/{idx}" @@ -379,10 +412,12 @@ def enable_placebo(session): pill.record() if "_ANSIBLE_PLACEBO_REPLAY" in os.environ: import shutil + import placebo + existing_entries = sorted([int(i) for i in os.listdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])]) idx = str(existing_entries[0]) - data_path = os.environ['_ANSIBLE_PLACEBO_REPLAY'] + "/" + idx + data_path = os.environ["_ANSIBLE_PLACEBO_REPLAY"] + "/" + idx try: shutil.rmtree("_tmp") except FileNotFoundError: @@ -392,3 +427,73 @@ def enable_placebo(session): os.rmdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"]) pill = placebo.attach(session, data_path="_tmp") pill.playback() + + +def check_sdk_version_supported(botocore_version=None, boto3_version=None, warn=None): + """Checks to see if the available boto3 / botocore versions are supported + args: + botocore_version: (str) overrides the minimum version of botocore supported by the collection + boto3_version: (str) overrides the minimum version of boto3 supported by the collection + warn: (Callable) invoked with a string message if boto3/botocore are less than the + supported versions + raises: + AnsibleBotocoreError - If botocore/boto3 is missing + returns + False if boto3 or botocore is less than the minimum supported versions + True if boto3 and botocore are greater than or equal the the minimum supported versions + """ + + botocore_version = botocore_version or MINIMUM_BOTOCORE_VERSION + boto3_version = boto3_version or MINIMUM_BOTO3_VERSION + + if not HAS_BOTO3: + raise AnsibleBotocoreError(message=missing_required_lib("botocore and boto3")) + + supported = True + + if not HAS_PACKAGING: + if warn: + warn("packaging.version Python module not installed, unable to check AWS SDK versions") + return True + if not botocore_at_least(botocore_version): + supported = False + if warn: + warn(f"botocore < {MINIMUM_BOTOCORE_VERSION} is not supported or tested. Some features may not work.") + if not boto3_at_least(boto3_version): + supported = False + if warn: + warn(f"boto3 < {MINIMUM_BOTO3_VERSION} is not supported or tested. Some features may not work.") + + return supported + + +def _version_at_least(a, b): + if not HAS_PACKAGING: + return True + return Version(a) >= Version(b) + + +def boto3_at_least(desired): + """Check if the available boto3 version is greater than or equal to a desired version. + + Usage: + if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'): + # conditionally fail on old boto3 versions if a specific feature is not supported + module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.") + """ + existing = gather_sdk_versions() + return _version_at_least(existing["boto3_version"], desired) + + +def botocore_at_least(desired): + """Check if the available botocore version is greater than or equal to a desired version. + + Usage: + if not module.botocore_at_least('1.2.3'): + module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3') + if not module.botocore_at_least('1.5.3'): + module.warn('Botocore did not include waiters for Service X before 1.5.3. ' + 'To wait until Service X resources are fully available, update botocore.') + """ + existing = gather_sdk_versions() + return _version_at_least(existing["botocore_version"], desired) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py index e690c0a86..4b2775cb3 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2021 Ansible Project # # This code is part of Ansible, but is an independent component. @@ -24,15 +26,10 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -import time import functools import random -import ansible.module_utils.common.warnings as ansible_warnings +import time class BackoffIterator: @@ -62,7 +59,9 @@ class BackoffIterator: return return_value -def _retry_func(func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class): +def _retry_func( + func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class +): counter = 0 for sleep_time in sleep_time_generator: try: @@ -108,6 +107,7 @@ class CloudRetry: else: # iterable return True + return _is_iterable() and response_code in catch_extra_error_codes @classmethod @@ -125,7 +125,9 @@ class CloudRetry: status_code_from_except_f=status_code_from_exception, base_class=cls.base_class, ) + return _retry_wrapper + return retry_decorator @classmethod @@ -179,35 +181,3 @@ class CloudRetry: catch_extra_error_codes=catch_extra_error_codes, sleep_time_generator=sleep_time_generator, ) - - @classmethod - def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): - """ - Wrap a callable with retry behavior. - Developers should use CloudRetry.exponential_backoff instead. - This method has been deprecated and will be removed in release 6.0.0, consider using exponential_backoff method instead. - Args: - retries (int): Number of times to retry a failed request before giving up - default=10 - delay (int or float): Initial delay between retries in seconds - default=3 - backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry - default=1.1 - catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry - default=None - Returns: - Callable: A generator that calls the decorated function using an exponential backoff. - """ - # This won't emit a warning (we don't have the context available to us), but will trigger - # sanity failures as we prepare for 6.0.0 - ansible_warnings.deprecate( - 'CloudRetry.backoff has been deprecated, please use CloudRetry.exponential_backoff instead', - version='6.0.0', collection_name='amazon.aws') - - return cls.exponential_backoff( - retries=tries, - delay=delay, - backoff=backoff, - max_delay=None, - catch_extra_error_codes=catch_extra_error_codes, - ) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py index c628bff14..342adc82d 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py @@ -1,20 +1,8 @@ # -*- coding: utf-8 -*- -# + # Copyright (c) 2017 Willem van Ketwich -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see <http://www.gnu.org/licenses/>. -# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + # Author: # - Willem van Ketwich <willem@vanketwich.com.au> # @@ -22,116 +10,147 @@ # - cloudfront_distribution # - cloudfront_invalidation # - cloudfront_origin_access_identity + """ Common cloudfront facts shared between modules """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from functools import partial try: import botocore except ImportError: pass -from .ec2 import AWSRetry -from .ec2 import boto3_tag_list_to_ansible_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from .retries import AWSRetry +from .tagging import boto3_tag_list_to_ansible_dict -class CloudFrontFactsServiceManager(object): - """Handles CloudFront Facts Services""" - def __init__(self, module): - self.module = module - self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) +class CloudFrontFactsServiceManagerFailure(Exception): + pass - def get_distribution(self, distribution_id): - try: - return self.client.get_distribution(Id=distribution_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing distribution") - def get_distribution_config(self, distribution_id): - try: - return self.client.get_distribution_config(Id=distribution_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing distribution configuration") +def cloudfront_facts_keyed_list_helper(list_to_key): + result = dict() + for item in list_to_key: + distribution_id = item["Id"] + if "Items" in item["Aliases"]: + result.update({alias: item for alias in item["Aliases"]["Items"]}) + result.update({distribution_id: item}) + return result - def get_origin_access_identity(self, origin_access_identity_id): - try: - return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing origin access identity") - def get_origin_access_identity_config(self, origin_access_identity_id): - try: - return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing origin access identity configuration") +@AWSRetry.jittered_backoff() +def _cloudfront_paginate_build_full_result(client, client_method, **kwargs): + paginator = client.get_paginator(client_method) + return paginator.paginate(**kwargs).build_full_result() - def get_invalidation(self, distribution_id, invalidation_id): - try: - return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing invalidation") - def get_streaming_distribution(self, distribution_id): - try: - return self.client.get_streaming_distribution(Id=distribution_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing streaming distribution") - - def get_streaming_distribution_config(self, distribution_id): - try: - return self.client.get_streaming_distribution_config(Id=distribution_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing streaming distribution") - - def list_origin_access_identities(self): - try: - paginator = self.client.get_paginator('list_cloud_front_origin_access_identities') - result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {}) - return result.get('Items', []) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities") - - def list_distributions(self, keyed=True): - try: - paginator = self.client.get_paginator('list_distributions') - result = paginator.paginate().build_full_result().get('DistributionList', {}) - distribution_list = result.get('Items', []) - if not keyed: - return distribution_list - return self.keyed_list_helper(distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing distributions") - - def list_distributions_by_web_acl_id(self, web_acl_id): - try: - result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id, aws_retry=True) - distribution_list = result.get('DistributionList', {}).get('Items', []) - return self.keyed_list_helper(distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing distributions by web acl id") +class CloudFrontFactsServiceManager: + """Handles CloudFront Facts Services""" - def list_invalidations(self, distribution_id): - try: - paginator = self.client.get_paginator('list_invalidations') - result = paginator.paginate(DistributionId=distribution_id).build_full_result() - return result.get('InvalidationList', {}).get('Items', []) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing invalidations") + CLOUDFRONT_CLIENT_API_MAPPING = { + "get_distribution": { + "error": "Error describing distribution", + }, + "get_distribution_config": { + "error": "Error describing distribution configuration", + }, + "get_origin_access_identity": { + "error": "Error describing origin access identity", + "client_api": "get_cloud_front_origin_access_identity", + }, + "get_origin_access_identity_config": { + "error": "Error describing origin access identity configuration", + "client_api": "get_cloud_front_origin_access_identity_config", + }, + "get_streaming_distribution": { + "error": "Error describing streaming distribution", + }, + "get_streaming_distribution_config": { + "error": "Error describing streaming distribution", + }, + "get_invalidation": { + "error": "Error describing invalidation", + }, + "list_distributions_by_web_acl_id": { + "error": "Error listing distributions by web acl id", + "post_process": lambda x: cloudfront_facts_keyed_list_helper( + x.get("DistributionList", {}).get("Items", []) + ), + }, + } + + CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING = { + "list_origin_access_identities": { + "error": "Error listing cloud front origin access identities", + "client_api": "list_cloud_front_origin_access_identities", + "key": "CloudFrontOriginAccessIdentityList", + }, + "list_distributions": { + "error": "Error listing distributions", + "key": "DistributionList", + "keyed": True, + }, + "list_invalidations": {"error": "Error listing invalidations", "key": "InvalidationList"}, + "list_streaming_distributions": { + "error": "Error listing streaming distributions", + "key": "StreamingDistributionList", + "keyed": True, + }, + } - def list_streaming_distributions(self, keyed=True): - try: - paginator = self.client.get_paginator('list_streaming_distributions') - result = paginator.paginate().build_full_result() - streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', []) - if not keyed: - return streaming_distribution_list - return self.keyed_list_helper(streaming_distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing streaming distributions") + def __init__(self, module): + self.module = module + self.client = module.client("cloudfront", retry_decorator=AWSRetry.jittered_backoff()) + + def describe_cloudfront_property(self, client_method, error, post_process, **kwargs): + fail_if_error = kwargs.pop("fail_if_error", True) + try: + method = getattr(self.client, client_method) + api_kwargs = snake_dict_to_camel_dict(kwargs, capitalize_first=True) + result = method(aws_retry=True, **api_kwargs) + result.pop("ResponseMetadata", None) + if post_process: + result = post_process(result) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if not fail_if_error: + raise + self.module.fail_json_aws(e, msg=error) + + def paginate_list_cloudfront_property(self, client_method, key, default_keyed, error, **kwargs): + fail_if_error = kwargs.pop("fail_if_error", True) + try: + keyed = kwargs.pop("keyed", default_keyed) + api_kwargs = snake_dict_to_camel_dict(kwargs, capitalize_first=True) + result = _cloudfront_paginate_build_full_result(self.client, client_method, **api_kwargs) + items = result.get(key, {}).get("Items", []) + if keyed: + items = cloudfront_facts_keyed_list_helper(items) + return items + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if not fail_if_error: + raise + self.module.fail_json_aws(e, msg=error) + + def __getattr__(self, name): + if name in self.CLOUDFRONT_CLIENT_API_MAPPING: + client_method = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("client_api", name) + error = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("error", "") + post_process = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("post_process") + return partial(self.describe_cloudfront_property, client_method, error, post_process) + + elif name in self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING: + client_method = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("client_api", name) + error = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("error", "") + key = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("key") + keyed = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("keyed", False) + return partial(self.paginate_list_cloudfront_property, client_method, key, keyed, error) + + raise CloudFrontFactsServiceManagerFailure(f"Method {name} is not currently supported") def summary(self): summary_dict = {} @@ -142,36 +161,38 @@ class CloudFrontFactsServiceManager(object): def summary_get_origin_access_identity_list(self): try: - origin_access_identity_list = {'origin_access_identities': []} - origin_access_identities = self.list_origin_access_identities() - for origin_access_identity in origin_access_identities: - oai_id = origin_access_identity['Id'] + origin_access_identities = [] + for origin_access_identity in self.list_origin_access_identities(): + oai_id = origin_access_identity["Id"] oai_full_response = self.get_origin_access_identity(oai_id) - oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']} - origin_access_identity_list['origin_access_identities'].append(oai_summary) - return origin_access_identity_list + oai_summary = {"Id": oai_id, "ETag": oai_full_response["ETag"]} + origin_access_identities.append(oai_summary) + return {"origin_access_identities": origin_access_identities} except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Error generating summary of origin access identities") + def list_resource_tags(self, resource_arn): + return self.client.list_tags_for_resource(Resource=resource_arn, aws_retry=True) + def summary_get_distribution_list(self, streaming=False): try: - list_name = 'streaming_distributions' if streaming else 'distributions' - key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled'] + list_name = "streaming_distributions" if streaming else "distributions" + key_list = ["Id", "ARN", "Status", "LastModifiedTime", "DomainName", "Comment", "PriceClass", "Enabled"] distribution_list = {list_name: []} - distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False) + distributions = ( + self.list_streaming_distributions(keyed=False) if streaming else self.list_distributions(keyed=False) + ) for dist in distributions: - temp_distribution = {} - for key_name in key_list: - temp_distribution[key_name] = dist[key_name] - temp_distribution['Aliases'] = list(dist['Aliases'].get('Items', [])) - temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming) + temp_distribution = {k: dist[k] for k in key_list} + temp_distribution["Aliases"] = list(dist["Aliases"].get("Items", [])) + temp_distribution["ETag"] = self.get_etag_from_distribution_id(dist["Id"], streaming) if not streaming: - temp_distribution['WebACLId'] = dist['WebACLId'] - invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id']) + temp_distribution["WebACLId"] = dist["WebACLId"] + invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist["Id"]) if invalidation_ids: - temp_distribution['Invalidations'] = invalidation_ids - resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'], aws_retry=True) - temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', [])) + temp_distribution["Invalidations"] = invalidation_ids + resource_tags = self.list_resource_tags(dist["ARN"]) + temp_distribution["Tags"] = boto3_tag_list_to_ansible_dict(resource_tags["Tags"].get("Items", [])) distribution_list[list_name].append(temp_distribution) return distribution_list except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -180,50 +201,32 @@ class CloudFrontFactsServiceManager(object): def get_etag_from_distribution_id(self, distribution_id, streaming): distribution = {} if not streaming: - distribution = self.get_distribution(distribution_id) + distribution = self.get_distribution(id=distribution_id) else: - distribution = self.get_streaming_distribution(distribution_id) - return distribution['ETag'] + distribution = self.get_streaming_distribution(id=distribution_id) + return distribution["ETag"] def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id): try: - invalidation_ids = [] - invalidations = self.list_invalidations(distribution_id) - for invalidation in invalidations: - invalidation_ids.append(invalidation['Id']) - return invalidation_ids + return list(map(lambda x: x["Id"], self.list_invalidations(distribution_id=distribution_id))) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Error getting list of invalidation ids") def get_distribution_id_from_domain_name(self, domain_name): try: distribution_id = "" - distributions = self.list_distributions(False) - distributions += self.list_streaming_distributions(False) + distributions = self.list_distributions(keyed=False) + distributions += self.list_streaming_distributions(keyed=False) for dist in distributions: - if 'Items' in dist['Aliases']: - for alias in dist['Aliases']['Items']: - if str(alias).lower() == domain_name.lower(): - distribution_id = dist['Id'] - break + if any(str(alias).lower() == domain_name.lower() for alias in dist["Aliases"].get("Items", [])): + distribution_id = dist["Id"] return distribution_id except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Error getting distribution id from domain name") def get_aliases_from_distribution_id(self, distribution_id): try: - distribution = self.get_distribution(distribution_id) - return distribution['DistributionConfig']['Aliases'].get('Items', []) + distribution = self.get_distribution(id=distribution_id) + return distribution["Distribution"]["DistributionConfig"]["Aliases"].get("Items", []) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id") - - def keyed_list_helper(self, list_to_key): - keyed_list = dict() - for item in list_to_key: - distribution_id = item['Id'] - if 'Items' in item['Aliases']: - aliases = item['Aliases']['Items'] - for alias in aliases: - keyed_list.update({alias: item}) - keyed_list.update({distribution_id: item}) - return keyed_list diff --git a/ansible_collections/amazon/aws/plugins/module_utils/common.py b/ansible_collections/amazon/aws/plugins/module_utils/common.py new file mode 100644 index 000000000..673915725 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/common.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2022, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +AMAZON_AWS_COLLECTION_NAME = "amazon.aws" +AMAZON_AWS_COLLECTION_VERSION = "7.4.0" + + +_collection_info_context = { + "name": AMAZON_AWS_COLLECTION_NAME, + "version": AMAZON_AWS_COLLECTION_VERSION, +} + + +def set_collection_info(collection_name=None, collection_version=None): + if collection_name: + _collection_info_context["name"] = collection_name + if collection_version: + _collection_info_context["version"] = collection_version + + +def get_collection_info(): + return _collection_info_context diff --git a/ansible_collections/amazon/aws/plugins/module_utils/core.py b/ansible_collections/amazon/aws/plugins/module_utils/core.py index bfd7fe101..44fd1d80b 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/core.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/core.py @@ -1,27 +1,14 @@ -# +# -*- coding: utf-8 -*- + # Copyright 2017 Michael De La Rue | Ansible -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """This module adds shared support for generic Amazon AWS modules In order to use this module, include it as part of a custom module as shown below. - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean mutually_exclusive=list1, required_together=list2) @@ -50,19 +37,19 @@ The call will be retried the specified number of times, so the calling functions don't need to be wrapped in the backoff decorator. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn from .arn import parse_aws_arn # pylint: disable=unused-import # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore from .botocore import HAS_BOTO3 # pylint: disable=unused-import +from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import from .botocore import is_boto3_error_code # pylint: disable=unused-import from .botocore import is_boto3_error_message # pylint: disable=unused-import -from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import from .botocore import normalize_boto3_result # pylint: disable=unused-import +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.exceptions +from .exceptions import AnsibleAWSError # pylint: disable=unused-import + # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules from .modules import AnsibleAWSModule # pylint: disable=unused-import @@ -70,8 +57,4 @@ from .modules import AnsibleAWSModule # pylint: disable=unused-import from .transformation import scrub_none_parameters # pylint: disable=unused-import # We will also export HAS_BOTO3 so end user modules can use it. -__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code', 'is_boto3_error_message') - - -class AnsibleAWSError(Exception): - pass +__all__ = ("AnsibleAWSModule", "HAS_BOTO3", "is_boto3_error_code", "is_boto3_error_message") diff --git a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py index abcbcfd23..8fdaf94b8 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # # This code is part of Ansible, but is an independent component. @@ -24,14 +26,11 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# + """ This module adds shared support for Direct Connect modules. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import traceback try: @@ -39,7 +38,7 @@ try: except ImportError: pass -from .ec2 import AWSRetry +from .retries import AWSRetry class DirectConnectError(Exception): @@ -53,37 +52,41 @@ def delete_connection(client, connection_id): try: AWSRetry.jittered_backoff()(client.delete_connection)(connectionId=connection_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to delete DirectConnection {connection_id}.", + last_traceback=traceback.format_exc(), + exception=e, + ) def associate_connection_and_lag(client, connection_id, lag_id): try: - AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id, - lagId=lag_id) + AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id, lagId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}" - " with link aggregation group {1}.".format(connection_id, lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to associate Direct Connect connection {connection_id} with link aggregation group {lag_id}.", + last_traceback=traceback.format_exc(), + exception=e, + ) def disassociate_connection_and_lag(client, connection_id, lag_id): try: - AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id, - lagId=lag_id) + AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id, lagId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}" - " from link aggregation group {1}.".format(connection_id, lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to disassociate Direct Connect connection {connection_id} from link aggregation group {lag_id}.", + last_traceback=traceback.format_exc(), + exception=e, + ) def delete_virtual_interface(client, virtual_interface): try: AWSRetry.jittered_backoff()(client.delete_virtual_interface)(virtualInterfaceId=virtual_interface) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Could not delete virtual interface {virtual_interface}", + last_traceback=traceback.format_exc(), + exception=e, + ) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py index 817c12298..afe8208f5 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -35,21 +37,19 @@ lived here. Most of these functions were not specific to EC2, they ended up in this module because "that's where the AWS code was" (originally). """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import re from ansible.module_utils.ansible_release import __version__ -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types + # Used to live here, moved into ansible.module_utils.common.dict_transformations from ansible.module_utils.common.dict_transformations import _camel_to_snake # pylint: disable=unused-import from ansible.module_utils.common.dict_transformations import _snake_to_camel # pylint: disable=unused-import from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict # pylint: disable=unused-import from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict # pylint: disable=unused-import +from ansible.module_utils.six import integer_types +from ansible.module_utils.six import string_types -# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.arn +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn from .arn import is_outpost_arn as is_outposts_arn # pylint: disable=unused-import # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore @@ -57,19 +57,26 @@ from .botocore import HAS_BOTO3 # pylint: disable=unused-import from .botocore import boto3_conn # pylint: disable=unused-import from .botocore import boto3_inventory_conn # pylint: disable=unused-import from .botocore import boto_exception # pylint: disable=unused-import -from .botocore import get_aws_region # pylint: disable=unused-import from .botocore import get_aws_connection_info # pylint: disable=unused-import - +from .botocore import get_aws_region # pylint: disable=unused-import from .botocore import paginated_query_with_retries -# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore -from .core import AnsibleAWSError # pylint: disable=unused-import +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.exceptions +from .exceptions import AnsibleAWSError # pylint: disable=unused-import # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules # The names have been changed in .modules to better reflect their applicability. from .modules import _aws_common_argument_spec as aws_common_argument_spec # pylint: disable=unused-import from .modules import aws_argument_spec as ec2_argument_spec # pylint: disable=unused-import +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.policy +from .policy import _py3cmp as py3cmp # pylint: disable=unused-import +from .policy import compare_policies # pylint: disable=unused-import +from .policy import sort_json_policy_dict # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.retries +from .retries import AWSRetry # pylint: disable=unused-import + # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.tagging from .tagging import ansible_dict_to_boto3_tag_list # pylint: disable=unused-import from .tagging import boto3_tag_list_to_ansible_dict # pylint: disable=unused-import @@ -79,14 +86,6 @@ from .tagging import compare_aws_tags # pylint: disable=unused-import from .transformation import ansible_dict_to_boto3_filter_list # pylint: disable=unused-import from .transformation import map_complex_type # pylint: disable=unused-import -# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.policy -from .policy import _py3cmp as py3cmp # pylint: disable=unused-import -from .policy import compare_policies # pylint: disable=unused-import -from .policy import sort_json_policy_dict # pylint: disable=unused-import - -# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.retries -from .retries import AWSRetry # pylint: disable=unused-import - try: import botocore except ImportError: @@ -94,18 +93,17 @@ except ImportError: def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=None): - - """ Return list of security group IDs from security group names. Note that security group names are not unique - across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This - will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in - a try block - """ + """Return list of security group IDs from security group names. Note that security group names are not unique + across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This + will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in + a try block + """ def get_sg_name(sg, boto3=None): - return str(sg['GroupName']) + return str(sg["GroupName"]) def get_sg_id(sg, boto3=None): - return str(sg['GroupId']) + return str(sg["GroupId"]) sec_group_id_list = [] @@ -116,25 +114,25 @@ def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id if vpc_id: filters = [ { - 'Name': 'vpc-id', - 'Values': [ + "Name": "vpc-id", + "Values": [ vpc_id, - ] + ], } ] - all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups'] + all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)["SecurityGroups"] else: - all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups'] + all_sec_groups = ec2_connection.describe_security_groups()["SecurityGroups"] unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups) sec_group_name_list = list(set(sec_group_list) - set(unmatched)) if len(unmatched) > 0: # If we have unmatched names that look like an ID, assume they are - sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)] - still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)] + sec_group_id_list[:] = [sg for sg in unmatched if re.match("sg-[a-fA-F0-9]+$", sg)] + still_unmatched = [sg for sg in unmatched if not re.match("sg-[a-fA-F0-9]+$", sg)] if len(still_unmatched) > 0: - raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched)) + raise ValueError(f"The following group names are not valid: {', '.join(still_unmatched)}") sec_group_id_list += [get_sg_id(all_sg) for all_sg in all_sec_groups if get_sg_name(all_sg) in sec_group_name_list] @@ -162,13 +160,11 @@ def add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes=None): try: tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set) - AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)( - client.create_tags - )( + AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(client.create_tags)( Resources=[resource_id], Tags=tags_to_add ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to add tags {0} to {1}".format(tags_to_set, resource_id)) + module.fail_json_aws(e, msg=f"Unable to add tags {tags_to_set} to {resource_id}") return True @@ -194,13 +190,11 @@ def remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes=None tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset] try: - AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)( - client.delete_tags - )( + AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(client.delete_tags)( Resources=[resource_id], Tags=tags_to_remove ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete tags {0} from {1}".format(tags_to_unset, resource_id)) + module.fail_json_aws(e, msg=f"Unable to delete tags {tags_to_unset} from {resource_id}") return True @@ -214,9 +208,9 @@ def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_cod :param resource_type: the type of the resource :param retry_codes: additional boto3 error codes to trigger retries """ - filters = {'resource-id': resource_id} + filters = {"resource-id": resource_id} if resource_type: - filters['resource-type'] = resource_type + filters["resource-type"] = resource_type filters = ansible_dict_to_boto3_filter_list(filters) if not retry_codes: @@ -224,11 +218,12 @@ def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_cod try: retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes) - results = paginated_query_with_retries(client, 'describe_tags', retry_decorator=retry_decorator, - Filters=filters) - return boto3_tag_list_to_ansible_dict(results.get('Tags', None)) + results = paginated_query_with_retries( + client, "describe_tags", retry_decorator=retry_decorator, Filters=filters + ) + return boto3_tag_list_to_ansible_dict(results.get("Tags", None)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to describe tags for EC2 Resource: {0}".format(resource_id)) + module.fail_json_aws(e, msg=f"Failed to describe tags for EC2 Resource: {resource_id}") def ensure_ec2_tags(client, module, resource_id, resource_type=None, tags=None, purge_tags=True, retry_codes=None): @@ -297,14 +292,23 @@ def normalize_ec2_vpc_dhcp_config(option_config): for config_item in option_config: # Handle single value keys - if config_item['Key'] == 'netbios-node-type': - if isinstance(config_item['Values'], integer_types): - config_data['netbios-node-type'] = str((config_item['Values'])) - elif isinstance(config_item['Values'], list): - config_data['netbios-node-type'] = str((config_item['Values'][0]['Value'])) + if config_item["Key"] == "netbios-node-type": + if isinstance(config_item["Values"], integer_types): + config_data["netbios-node-type"] = str((config_item["Values"])) + elif isinstance(config_item["Values"], list): + config_data["netbios-node-type"] = str((config_item["Values"][0]["Value"])) # Handle actual lists of values - for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']: - if config_item['Key'] == option: - config_data[option] = [val['Value'] for val in config_item['Values']] + for option in ["domain-name", "domain-name-servers", "ntp-servers", "netbios-name-servers"]: + if config_item["Key"] == option: + config_data[option] = [val["Value"] for val in config_item["Values"]] return config_data + + +@AWSRetry.jittered_backoff(retries=10) +def helper_describe_import_image_tasks(client, module, **params): + try: + paginator = client.get_paginator("describe_import_image_tasks") + return paginator.paginate(**params).build_full_result()["ImportImageTasks"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe the import image") diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py index 218052d2f..8dc5eabfe 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py @@ -1,16 +1,16 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass -from .core import is_boto3_error_code -from .ec2 import AWSRetry +from .botocore import is_boto3_error_code +from .retries import AWSRetry def get_elb(connection, module, elb_name): @@ -40,9 +40,9 @@ def _get_elb(connection, module, elb_name): """ try: - load_balancer_paginator = connection.get_paginator('describe_load_balancers') - return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0] - except is_boto3_error_code('LoadBalancerNotFound'): + load_balancer_paginator = connection.get_paginator("describe_load_balancers") + return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())["LoadBalancers"][0] + except is_boto3_error_code("LoadBalancerNotFound"): return None @@ -58,15 +58,17 @@ def get_elb_listener(connection, module, elb_arn, listener_port): """ try: - listener_paginator = connection.get_paginator('describe_listeners') - listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners'] + listener_paginator = connection.get_paginator("describe_listeners") + listeners = ( + AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result() + )["Listeners"] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) l = None for listener in listeners: - if listener['Port'] == listener_port: + if listener["Port"] == listener_port: l = listener break @@ -84,7 +86,7 @@ def get_elb_listener_rules(connection, module, listener_arn): """ try: - return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules'] + return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)["Rules"] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) @@ -104,6 +106,6 @@ def convert_tg_name_to_arn(connection, module, tg_name): except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) - tg_arn = response['TargetGroups'][0]['TargetGroupArn'] + tg_arn = response["TargetGroups"][0]["TargetGroupArn"] return tg_arn diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py index 04f6114e1..758eb9a33 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py @@ -1,36 +1,36 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import traceback from copy import deepcopy try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass -from .ec2 import AWSRetry -from .ec2 import ansible_dict_to_boto3_tag_list -from .ec2 import boto3_tag_list_to_ansible_dict from .ec2 import get_ec2_security_group_ids_from_names from .elb_utils import convert_tg_name_to_arn from .elb_utils import get_elb from .elb_utils import get_elb_listener +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict from .waiters import get_waiter def _simple_forward_config_arn(config, parent_arn): config = deepcopy(config) - stickiness = config.pop('TargetGroupStickinessConfig', {'Enabled': False}) + stickiness = config.pop("TargetGroupStickinessConfig", {"Enabled": False}) # Stickiness options set, non default value - if stickiness != {'Enabled': False}: + if stickiness != {"Enabled": False}: return False - target_groups = config.pop('TargetGroups', []) + target_groups = config.pop("TargetGroups", []) # non-default config left over, probably invalid if config: @@ -45,9 +45,9 @@ def _simple_forward_config_arn(config, parent_arn): target_group = target_groups[0] # We don't care about the weight with a single TG - target_group.pop('Weight', None) + target_group.pop("Weight", None) - target_group_arn = target_group.pop('TargetGroupArn', None) + target_group_arn = target_group.pop("TargetGroupArn", None) # non-default config left over if target_group: @@ -75,12 +75,12 @@ def _prune_ForwardConfig(action): Drops a redundant ForwardConfig where TargetGroupARN has already been set. (So we can perform comparisons) """ - if action.get('Type', "") != 'forward': + if action.get("Type", "") != "forward": return action if "ForwardConfig" not in action: return action - parent_arn = action.get('TargetGroupArn', None) + parent_arn = action.get("TargetGroupArn", None) arn = _simple_forward_config_arn(action["ForwardConfig"], parent_arn) if not arn: return action @@ -95,17 +95,23 @@ def _prune_ForwardConfig(action): # remove the client secret if UseExistingClientSecret, because aws won't return it # add default values when they are not requested def _prune_secret(action): - if action['Type'] != 'authenticate-oidc': + if action["Type"] != "authenticate-oidc": return action - if not action['AuthenticateOidcConfig'].get('Scope', False): - action['AuthenticateOidcConfig']['Scope'] = 'openid' + if not action["AuthenticateOidcConfig"].get("Scope", False): + action["AuthenticateOidcConfig"]["Scope"] = "openid" + + if not action["AuthenticateOidcConfig"].get("SessionTimeout", False): + action["AuthenticateOidcConfig"]["SessionTimeout"] = 604800 - if not action['AuthenticateOidcConfig'].get('SessionTimeout', False): - action['AuthenticateOidcConfig']['SessionTimeout'] = 604800 + if action["AuthenticateOidcConfig"].get("UseExistingClientSecret", False): + action["AuthenticateOidcConfig"].pop("ClientSecret", None) - if action['AuthenticateOidcConfig'].get('UseExistingClientSecret', False): - action['AuthenticateOidcConfig'].pop('ClientSecret', None) + if not action["AuthenticateOidcConfig"].get("OnUnauthenticatedRequest", False): + action["AuthenticateOidcConfig"]["OnUnauthenticatedRequest"] = "authenticate" + + if not action["AuthenticateOidcConfig"].get("SessionCookieName", False): + action["AuthenticateOidcConfig"]["SessionCookieName"] = "AWSELBAuthSessionCookie" return action @@ -113,22 +119,20 @@ def _prune_secret(action): # while AWS api also won't return UseExistingClientSecret key # it must be added, because it's requested and compared def _append_use_existing_client_secretn(action): - if action['Type'] != 'authenticate-oidc': + if action["Type"] != "authenticate-oidc": return action - action['AuthenticateOidcConfig']['UseExistingClientSecret'] = True + action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = True return action def _sort_actions(actions): - return sorted(actions, key=lambda x: x.get('Order', 0)) - + return sorted(actions, key=lambda x: x.get("Order", 0)) -class ElasticLoadBalancerV2(object): +class ElasticLoadBalancerV2: def __init__(self, connection, module): - self.connection = connection self.module = module self.changed = False @@ -152,7 +156,7 @@ class ElasticLoadBalancerV2(object): if self.elb is not None: self.elb_attributes = self.get_elb_attributes() self.elb_ip_addr_type = self.get_elb_ip_address_type() - self.elb['tags'] = self.get_elb_tags() + self.elb["tags"] = self.get_elb_tags() else: self.elb_attributes = None @@ -168,8 +172,8 @@ class ElasticLoadBalancerV2(object): return waiter_names = { - 'ipv4': 'load_balancer_ip_address_type_ipv4', - 'dualstack': 'load_balancer_ip_address_type_dualstack', + "ipv4": "load_balancer_ip_address_type_ipv4", + "dualstack": "load_balancer_ip_address_type_dualstack", } if ip_type not in waiter_names: return @@ -192,7 +196,7 @@ class ElasticLoadBalancerV2(object): return try: - waiter = get_waiter(self.connection, 'load_balancer_available') + waiter = get_waiter(self.connection, "load_balancer_available") waiter.wait(LoadBalancerArns=[elb_arn]) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -209,7 +213,7 @@ class ElasticLoadBalancerV2(object): return try: - waiter = get_waiter(self.connection, 'load_balancers_deleted') + waiter = get_waiter(self.connection, "load_balancers_deleted") waiter.wait(LoadBalancerArns=[elb_arn]) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -222,16 +226,16 @@ class ElasticLoadBalancerV2(object): """ try: - attr_list = AWSRetry.jittered_backoff()( - self.connection.describe_load_balancer_attributes - )(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes'] + attr_list = AWSRetry.jittered_backoff()(self.connection.describe_load_balancer_attributes)( + LoadBalancerArn=self.elb["LoadBalancerArn"] + )["Attributes"] elb_attributes = boto3_tag_list_to_ansible_dict(attr_list) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) # Replace '.' with '_' in attribute key names to make it more Ansibley - return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items()) + return dict((k.replace(".", "_"), v) for k, v in elb_attributes.items()) def get_elb_ip_address_type(self): """ @@ -240,7 +244,7 @@ class ElasticLoadBalancerV2(object): :return: """ - return self.elb.get('IpAddressType', None) + return self.elb.get("IpAddressType", None) def update_elb_attributes(self): """ @@ -257,9 +261,9 @@ class ElasticLoadBalancerV2(object): """ try: - return AWSRetry.jittered_backoff()( - self.connection.describe_tags - )(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags'] + return AWSRetry.jittered_backoff()(self.connection.describe_tags)( + ResourceArns=[self.elb["LoadBalancerArn"]] + )["TagDescriptions"][0]["Tags"] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -271,9 +275,9 @@ class ElasticLoadBalancerV2(object): """ try: - AWSRetry.jittered_backoff()( - self.connection.remove_tags - )(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete) + AWSRetry.jittered_backoff()(self.connection.remove_tags)( + ResourceArns=[self.elb["LoadBalancerArn"]], TagKeys=tags_to_delete + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -287,9 +291,9 @@ class ElasticLoadBalancerV2(object): """ try: - AWSRetry.jittered_backoff()( - self.connection.add_tags - )(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags) + AWSRetry.jittered_backoff()(self.connection.add_tags)( + ResourceArns=[self.elb["LoadBalancerArn"]], Tags=self.tags + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -302,13 +306,13 @@ class ElasticLoadBalancerV2(object): """ try: - AWSRetry.jittered_backoff()( - self.connection.delete_load_balancer - )(LoadBalancerArn=self.elb['LoadBalancerArn']) + AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( + LoadBalancerArn=self.elb["LoadBalancerArn"] + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) - self.wait_for_deletion(self.elb['LoadBalancerArn']) + self.wait_for_deletion(self.elb["LoadBalancerArn"]) self.changed = True @@ -326,7 +330,7 @@ class ElasticLoadBalancerV2(object): if self.subnets is not None: # Convert subnets to subnet_mappings format for comparison for subnet in self.subnets: - subnet_mappings.append({'SubnetId': subnet}) + subnet_mappings.append({"SubnetId": subnet}) if self.subnet_mappings is not None: # Use this directly since we're comparing as a mapping @@ -334,16 +338,18 @@ class ElasticLoadBalancerV2(object): # Build a subnet_mapping style struture of what's currently # on the load balancer - for subnet in self.elb['AvailabilityZones']: - this_mapping = {'SubnetId': subnet['SubnetId']} - for address in subnet.get('LoadBalancerAddresses', []): - if 'AllocationId' in address: - this_mapping['AllocationId'] = address['AllocationId'] + for subnet in self.elb["AvailabilityZones"]: + this_mapping = {"SubnetId": subnet["SubnetId"]} + for address in subnet.get("LoadBalancerAddresses", []): + if "AllocationId" in address: + this_mapping["AllocationId"] = address["AllocationId"] break subnet_mapping_id_list.append(this_mapping) - return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings) + return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set( + frozenset(mapping.items()) for mapping in subnet_mappings + ) def modify_subnets(self): """ @@ -352,9 +358,9 @@ class ElasticLoadBalancerV2(object): """ try: - AWSRetry.jittered_backoff()( - self.connection.set_subnets - )(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets) + AWSRetry.jittered_backoff()(self.connection.set_subnets)( + LoadBalancerArn=self.elb["LoadBalancerArn"], Subnets=self.subnets + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -367,7 +373,7 @@ class ElasticLoadBalancerV2(object): """ self.elb = get_elb(self.connection, self.module, self.module.params.get("name")) - self.elb['tags'] = self.get_elb_tags() + self.elb["tags"] = self.get_elb_tags() def modify_ip_address_type(self, ip_addr_type): """ @@ -380,30 +386,30 @@ class ElasticLoadBalancerV2(object): return try: - AWSRetry.jittered_backoff()( - self.connection.set_ip_address_type - )(LoadBalancerArn=self.elb['LoadBalancerArn'], IpAddressType=ip_addr_type) + AWSRetry.jittered_backoff()(self.connection.set_ip_address_type)( + LoadBalancerArn=self.elb["LoadBalancerArn"], IpAddressType=ip_addr_type + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True - self.wait_for_ip_type(self.elb['LoadBalancerArn'], ip_addr_type) + self.wait_for_ip_type(self.elb["LoadBalancerArn"], ip_addr_type) def _elb_create_params(self): # Required parameters params = dict() - params['Name'] = self.name - params['Type'] = self.type + params["Name"] = self.name + params["Type"] = self.type # Other parameters if self.elb_ip_addr_type is not None: - params['IpAddressType'] = self.elb_ip_addr_type + params["IpAddressType"] = self.elb_ip_addr_type if self.subnets is not None: - params['Subnets'] = self.subnets + params["Subnets"] = self.subnets if self.subnet_mappings is not None: - params['SubnetMappings'] = self.subnet_mappings + params["SubnetMappings"] = self.subnet_mappings if self.tags: - params['Tags'] = self.tags + params["Tags"] = self.tags # Scheme isn't supported for GatewayLBs, so we won't add it here, even though we don't # support them yet. @@ -418,40 +424,39 @@ class ElasticLoadBalancerV2(object): params = self._elb_create_params() try: - self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0] + self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)["LoadBalancers"][0] self.changed = True self.new_load_balancer = True except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) - self.wait_for_status(self.elb['LoadBalancerArn']) + self.wait_for_status(self.elb["LoadBalancerArn"]) class ApplicationLoadBalancer(ElasticLoadBalancerV2): - def __init__(self, connection, connection_ec2, module): """ :param connection: boto3 connection :param module: Ansible module """ - super(ApplicationLoadBalancer, self).__init__(connection, module) + super().__init__(connection, module) self.connection_ec2 = connection_ec2 # Ansible module parameters specific to ALBs - self.type = 'application' - if module.params.get('security_groups') is not None: + self.type = "application" + if module.params.get("security_groups") is not None: try: - self.security_groups = AWSRetry.jittered_backoff()( - get_ec2_security_group_ids_from_names - )(module.params.get('security_groups'), self.connection_ec2, boto3=True) + self.security_groups = AWSRetry.jittered_backoff()(get_ec2_security_group_ids_from_names)( + module.params.get("security_groups"), self.connection_ec2, boto3=True + ) except ValueError as e: self.module.fail_json(msg=str(e), exception=traceback.format_exc()) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) else: - self.security_groups = module.params.get('security_groups') + self.security_groups = module.params.get("security_groups") self.access_logs_enabled = module.params.get("access_logs_enabled") self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket") self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix") @@ -463,15 +468,17 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): self.http_xff_client_port = module.params.get("http_xff_client_port") self.waf_fail_open = module.params.get("waf_fail_open") - if self.elb is not None and self.elb['Type'] != 'application': - self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.") + if self.elb is not None and self.elb["Type"] != "application": + self.module.fail_json( + msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.", + ) def _elb_create_params(self): params = super()._elb_create_params() if self.security_groups is not None: - params['SecurityGroups'] = self.security_groups - params['Scheme'] = self.scheme + params["SecurityGroups"] = self.security_groups + params["Scheme"] = self.scheme return params @@ -482,34 +489,77 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): """ update_attributes = [] - if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']: - update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()}) - if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']: - update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket}) - if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']: - update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix}) - if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: - update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) - if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']: - update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)}) - if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']: - update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()}) - if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \ - self.elb_attributes['routing_http_desync_mitigation_mode']: - update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()}) - if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \ - self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']: - update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()}) - if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \ - self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']: - update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled', - 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()}) - if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \ - self.elb_attributes['routing_http_xff_client_port_enabled']: - update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()}) - if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \ - self.elb_attributes['waf_fail_open_enabled']: - update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()}) + if ( + self.access_logs_enabled is not None + and str(self.access_logs_enabled).lower() != self.elb_attributes["access_logs_s3_enabled"] + ): + update_attributes.append({"Key": "access_logs.s3.enabled", "Value": str(self.access_logs_enabled).lower()}) + if ( + self.access_logs_s3_bucket is not None + and self.access_logs_s3_bucket != self.elb_attributes["access_logs_s3_bucket"] + ): + update_attributes.append({"Key": "access_logs.s3.bucket", "Value": self.access_logs_s3_bucket}) + if ( + self.access_logs_s3_prefix is not None + and self.access_logs_s3_prefix != self.elb_attributes["access_logs_s3_prefix"] + ): + update_attributes.append({"Key": "access_logs.s3.prefix", "Value": self.access_logs_s3_prefix}) + if ( + self.deletion_protection is not None + and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"] + ): + update_attributes.append( + {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()} + ) + if ( + self.idle_timeout is not None + and str(self.idle_timeout) != self.elb_attributes["idle_timeout_timeout_seconds"] + ): + update_attributes.append({"Key": "idle_timeout.timeout_seconds", "Value": str(self.idle_timeout)}) + if self.http2 is not None and str(self.http2).lower() != self.elb_attributes["routing_http2_enabled"]: + update_attributes.append({"Key": "routing.http2.enabled", "Value": str(self.http2).lower()}) + if ( + self.http_desync_mitigation_mode is not None + and str(self.http_desync_mitigation_mode).lower() + != self.elb_attributes["routing_http_desync_mitigation_mode"] + ): + update_attributes.append( + {"Key": "routing.http.desync_mitigation_mode", "Value": str(self.http_desync_mitigation_mode).lower()} + ) + if ( + self.http_drop_invalid_header_fields is not None + and str(self.http_drop_invalid_header_fields).lower() + != self.elb_attributes["routing_http_drop_invalid_header_fields_enabled"] + ): + update_attributes.append( + { + "Key": "routing.http.drop_invalid_header_fields.enabled", + "Value": str(self.http_drop_invalid_header_fields).lower(), + } + ) + if ( + self.http_x_amzn_tls_version_and_cipher_suite is not None + and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() + != self.elb_attributes["routing_http_x_amzn_tls_version_and_cipher_suite_enabled"] + ): + update_attributes.append( + { + "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled", + "Value": str(self.http_x_amzn_tls_version_and_cipher_suite).lower(), + } + ) + if ( + self.http_xff_client_port is not None + and str(self.http_xff_client_port).lower() != self.elb_attributes["routing_http_xff_client_port_enabled"] + ): + update_attributes.append( + {"Key": "routing.http.xff_client_port.enabled", "Value": str(self.http_xff_client_port).lower()} + ) + if ( + self.waf_fail_open is not None + and str(self.waf_fail_open).lower() != self.elb_attributes["waf_fail_open_enabled"] + ): + update_attributes.append({"Key": "waf.fail_open.enabled", "Value": str(self.waf_fail_open).lower()}) if update_attributes: return False @@ -525,45 +575,90 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): update_attributes = [] - if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']: - update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()}) - if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']: - update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket}) - if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']: - update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix}) - if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: - update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) - if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']: - update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)}) - if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']: - update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()}) - if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \ - self.elb_attributes['routing_http_desync_mitigation_mode']: - update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()}) - if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \ - self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']: - update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()}) - if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \ - self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']: - update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled', - 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()}) - if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \ - self.elb_attributes['routing_http_xff_client_port_enabled']: - update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()}) - if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \ - self.elb_attributes['waf_fail_open_enabled']: - update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()}) + if ( + self.access_logs_enabled is not None + and str(self.access_logs_enabled).lower() != self.elb_attributes["access_logs_s3_enabled"] + ): + update_attributes.append({"Key": "access_logs.s3.enabled", "Value": str(self.access_logs_enabled).lower()}) + if ( + self.access_logs_s3_bucket is not None + and self.access_logs_s3_bucket != self.elb_attributes["access_logs_s3_bucket"] + ): + update_attributes.append({"Key": "access_logs.s3.bucket", "Value": self.access_logs_s3_bucket}) + if ( + self.access_logs_s3_prefix is not None + and self.access_logs_s3_prefix != self.elb_attributes["access_logs_s3_prefix"] + ): + update_attributes.append({"Key": "access_logs.s3.prefix", "Value": self.access_logs_s3_prefix}) + if ( + self.deletion_protection is not None + and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"] + ): + update_attributes.append( + {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()} + ) + if ( + self.idle_timeout is not None + and str(self.idle_timeout) != self.elb_attributes["idle_timeout_timeout_seconds"] + ): + update_attributes.append({"Key": "idle_timeout.timeout_seconds", "Value": str(self.idle_timeout)}) + if self.http2 is not None and str(self.http2).lower() != self.elb_attributes["routing_http2_enabled"]: + update_attributes.append({"Key": "routing.http2.enabled", "Value": str(self.http2).lower()}) + if ( + self.http_desync_mitigation_mode is not None + and str(self.http_desync_mitigation_mode).lower() + != self.elb_attributes["routing_http_desync_mitigation_mode"] + ): + update_attributes.append( + {"Key": "routing.http.desync_mitigation_mode", "Value": str(self.http_desync_mitigation_mode).lower()} + ) + if ( + self.http_drop_invalid_header_fields is not None + and str(self.http_drop_invalid_header_fields).lower() + != self.elb_attributes["routing_http_drop_invalid_header_fields_enabled"] + ): + update_attributes.append( + { + "Key": "routing.http.drop_invalid_header_fields.enabled", + "Value": str(self.http_drop_invalid_header_fields).lower(), + } + ) + if ( + self.http_x_amzn_tls_version_and_cipher_suite is not None + and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() + != self.elb_attributes["routing_http_x_amzn_tls_version_and_cipher_suite_enabled"] + ): + update_attributes.append( + { + "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled", + "Value": str(self.http_x_amzn_tls_version_and_cipher_suite).lower(), + } + ) + if ( + self.http_xff_client_port is not None + and str(self.http_xff_client_port).lower() != self.elb_attributes["routing_http_xff_client_port_enabled"] + ): + update_attributes.append( + {"Key": "routing.http.xff_client_port.enabled", "Value": str(self.http_xff_client_port).lower()} + ) + if ( + self.waf_fail_open is not None + and str(self.waf_fail_open).lower() != self.elb_attributes["waf_fail_open_enabled"] + ): + update_attributes.append({"Key": "waf.fail_open.enabled", "Value": str(self.waf_fail_open).lower()}) if update_attributes: try: - AWSRetry.jittered_backoff()( - self.connection.modify_load_balancer_attributes - )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes) + AWSRetry.jittered_backoff()(self.connection.modify_load_balancer_attributes)( + LoadBalancerArn=self.elb["LoadBalancerArn"], Attributes=update_attributes + ) self.changed = True except (BotoCoreError, ClientError) as e: # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state if self.new_load_balancer: - AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn']) + AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( + LoadBalancerArn=self.elb["LoadBalancerArn"] + ) self.module.fail_json_aws(e) def compare_security_groups(self): @@ -573,7 +668,7 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): :return: bool True if they match otherwise False """ - if set(self.elb['SecurityGroups']) != set(self.security_groups): + if set(self.elb["SecurityGroups"]) != set(self.security_groups): return False else: return True @@ -585,9 +680,9 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): """ try: - AWSRetry.jittered_backoff()( - self.connection.set_security_groups - )(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups) + AWSRetry.jittered_backoff()(self.connection.set_security_groups)( + LoadBalancerArn=self.elb["LoadBalancerArn"], SecurityGroups=self.security_groups + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -595,29 +690,29 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): class NetworkLoadBalancer(ElasticLoadBalancerV2): - def __init__(self, connection, connection_ec2, module): - """ :param connection: boto3 connection :param module: Ansible module """ - super(NetworkLoadBalancer, self).__init__(connection, module) + super().__init__(connection, module) self.connection_ec2 = connection_ec2 # Ansible module parameters specific to NLBs - self.type = 'network' - self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing') + self.type = "network" + self.cross_zone_load_balancing = module.params.get("cross_zone_load_balancing") - if self.elb is not None and self.elb['Type'] != 'network': - self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.") + if self.elb is not None and self.elb["Type"] != "network": + self.module.fail_json( + msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.", + ) def _elb_create_params(self): params = super()._elb_create_params() - params['Scheme'] = self.scheme + params["Scheme"] = self.scheme return params @@ -630,22 +725,33 @@ class NetworkLoadBalancer(ElasticLoadBalancerV2): update_attributes = [] - if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \ - self.elb_attributes['load_balancing_cross_zone_enabled']: - update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()}) - if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: - update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) + if ( + self.cross_zone_load_balancing is not None + and str(self.cross_zone_load_balancing).lower() != self.elb_attributes["load_balancing_cross_zone_enabled"] + ): + update_attributes.append( + {"Key": "load_balancing.cross_zone.enabled", "Value": str(self.cross_zone_load_balancing).lower()} + ) + if ( + self.deletion_protection is not None + and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"] + ): + update_attributes.append( + {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()} + ) if update_attributes: try: - AWSRetry.jittered_backoff()( - self.connection.modify_load_balancer_attributes - )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes) + AWSRetry.jittered_backoff()(self.connection.modify_load_balancer_attributes)( + LoadBalancerArn=self.elb["LoadBalancerArn"], Attributes=update_attributes + ) self.changed = True except (BotoCoreError, ClientError) as e: # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state if self.new_load_balancer: - AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn']) + AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( + LoadBalancerArn=self.elb["LoadBalancerArn"] + ) self.module.fail_json_aws(e) def modify_subnets(self): @@ -654,20 +760,21 @@ class NetworkLoadBalancer(ElasticLoadBalancerV2): :return: """ - self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer') + self.module.fail_json(msg="Modifying subnets and elastic IPs is not supported for Network Load Balancer") -class ELBListeners(object): - +class ELBListeners: def __init__(self, connection, module, elb_arn): - self.connection = connection self.module = module self.elb_arn = elb_arn listeners = module.params.get("listeners") if listeners is not None: # Remove suboption argspec defaults of None from each listener - listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners] + listeners = [ + dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) + for listener_dict in listeners + ] self.listeners = self._ensure_listeners_default_action_has_arn(listeners) self.current_listeners = self._get_elb_listeners() self.purge_listeners = module.params.get("purge_listeners") @@ -689,8 +796,12 @@ class ELBListeners(object): """ try: - listener_paginator = self.connection.get_paginator('describe_listeners') - return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners'] + listener_paginator = self.connection.get_paginator("describe_listeners") + return ( + AWSRetry.jittered_backoff()(listener_paginator.paginate)( + LoadBalancerArn=self.elb_arn + ).build_full_result() + )["Listeners"] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -709,14 +820,14 @@ class ELBListeners(object): fixed_listeners = [] for listener in listeners: fixed_actions = [] - for action in listener['DefaultActions']: - if 'TargetGroupName' in action: - action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, - self.module, - action['TargetGroupName']) - del action['TargetGroupName'] + for action in listener["DefaultActions"]: + if "TargetGroupName" in action: + action["TargetGroupArn"] = convert_tg_name_to_arn( + self.connection, self.module, action["TargetGroupName"] + ) + del action["TargetGroupName"] fixed_actions.append(action) - listener['DefaultActions'] = fixed_actions + listener["DefaultActions"] = fixed_actions fixed_listeners.append(listener) return fixed_listeners @@ -734,21 +845,21 @@ class ELBListeners(object): for current_listener in self.current_listeners: current_listener_passed_to_module = False for new_listener in self.listeners[:]: - new_listener['Port'] = int(new_listener['Port']) - if current_listener['Port'] == new_listener['Port']: + new_listener["Port"] = int(new_listener["Port"]) + if current_listener["Port"] == new_listener["Port"]: current_listener_passed_to_module = True # Remove what we match so that what is left can be marked as 'to be added' listeners_to_add.remove(new_listener) modified_listener = self._compare_listener(current_listener, new_listener) if modified_listener: - modified_listener['Port'] = current_listener['Port'] - modified_listener['ListenerArn'] = current_listener['ListenerArn'] + modified_listener["Port"] = current_listener["Port"] + modified_listener["ListenerArn"] = current_listener["ListenerArn"] listeners_to_modify.append(modified_listener) break # If the current listener was not matched against passed listeners and purge is True, mark for removal if not current_listener_passed_to_module and self.purge_listeners: - listeners_to_delete.append(current_listener['ListenerArn']) + listeners_to_delete.append(current_listener["ListenerArn"]) return listeners_to_add, listeners_to_modify, listeners_to_delete @@ -764,43 +875,50 @@ class ELBListeners(object): modified_listener = {} # Port - if current_listener['Port'] != new_listener['Port']: - modified_listener['Port'] = new_listener['Port'] + if current_listener["Port"] != new_listener["Port"]: + modified_listener["Port"] = new_listener["Port"] # Protocol - if current_listener['Protocol'] != new_listener['Protocol']: - modified_listener['Protocol'] = new_listener['Protocol'] + if current_listener["Protocol"] != new_listener["Protocol"]: + modified_listener["Protocol"] = new_listener["Protocol"] # If Protocol is HTTPS, check additional attributes - if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS': + if current_listener["Protocol"] == "HTTPS" and new_listener["Protocol"] == "HTTPS": # Cert - if current_listener['SslPolicy'] != new_listener['SslPolicy']: - modified_listener['SslPolicy'] = new_listener['SslPolicy'] - if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']: - modified_listener['Certificates'] = [] - modified_listener['Certificates'].append({}) - modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn'] - elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS': - modified_listener['SslPolicy'] = new_listener['SslPolicy'] - modified_listener['Certificates'] = [] - modified_listener['Certificates'].append({}) - modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn'] + if current_listener["SslPolicy"] != new_listener["SslPolicy"]: + modified_listener["SslPolicy"] = new_listener["SslPolicy"] + if ( + current_listener["Certificates"][0]["CertificateArn"] + != new_listener["Certificates"][0]["CertificateArn"] + ): + modified_listener["Certificates"] = [] + modified_listener["Certificates"].append({}) + modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0][ + "CertificateArn" + ] + elif current_listener["Protocol"] != "HTTPS" and new_listener["Protocol"] == "HTTPS": + modified_listener["SslPolicy"] = new_listener["SslPolicy"] + modified_listener["Certificates"] = [] + modified_listener["Certificates"].append({}) + modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0]["CertificateArn"] # Default action # If the lengths of the actions are the same, we'll have to verify that the # contents of those actions are the same - if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']): - current_actions_sorted = _sort_actions(current_listener['DefaultActions']) - new_actions_sorted = _sort_actions(new_listener['DefaultActions']) + if len(current_listener["DefaultActions"]) == len(new_listener["DefaultActions"]): + current_actions_sorted = _sort_actions(current_listener["DefaultActions"]) + new_actions_sorted = _sort_actions(new_listener["DefaultActions"]) new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted] - if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]: - modified_listener['DefaultActions'] = new_listener['DefaultActions'] + if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [ + _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret + ]: + modified_listener["DefaultActions"] = new_listener["DefaultActions"] # If the action lengths are different, then replace with the new actions else: - modified_listener['DefaultActions'] = new_listener['DefaultActions'] + modified_listener["DefaultActions"] = new_listener["DefaultActions"] if modified_listener: return modified_listener @@ -808,8 +926,7 @@ class ELBListeners(object): return None -class ELBListener(object): - +class ELBListener: def __init__(self, connection, module, listener, elb_arn): """ @@ -825,37 +942,32 @@ class ELBListener(object): self.elb_arn = elb_arn def add(self): - try: # Rules is not a valid parameter for create_listener - if 'Rules' in self.listener: - self.listener.pop('Rules') + if "Rules" in self.listener: + self.listener.pop("Rules") AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) def modify(self): - try: # Rules is not a valid parameter for modify_listener - if 'Rules' in self.listener: - self.listener.pop('Rules') + if "Rules" in self.listener: + self.listener.pop("Rules") AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) def delete(self): - try: AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) -class ELBListenerRules(object): - +class ELBListenerRules: def __init__(self, connection, module, elb_arn, listener_rules, listener_port): - self.connection = connection self.module = module self.elb_arn = elb_arn @@ -864,13 +976,10 @@ class ELBListenerRules(object): # Get listener based on port so we can use ARN self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port) - self.listener_arn = self.current_listener['ListenerArn'] - self.rules_to_add = deepcopy(self.rules) - self.rules_to_modify = [] - self.rules_to_delete = [] + self.listener_arn = self.current_listener.get("ListenerArn") # If the listener exists (i.e. has an ARN) get rules for the listener - if 'ListenerArn' in self.current_listener: + if "ListenerArn" in self.current_listener: self.current_rules = self._get_elb_listener_rules() else: self.current_rules = [] @@ -887,20 +996,23 @@ class ELBListenerRules(object): fixed_rules = [] for rule in rules: fixed_actions = [] - for action in rule['Actions']: - if 'TargetGroupName' in action: - action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName']) - del action['TargetGroupName'] + for action in rule["Actions"]: + if "TargetGroupName" in action: + action["TargetGroupArn"] = convert_tg_name_to_arn( + self.connection, self.module, action["TargetGroupName"] + ) + del action["TargetGroupName"] fixed_actions.append(action) - rule['Actions'] = fixed_actions + rule["Actions"] = fixed_actions fixed_rules.append(rule) return fixed_rules def _get_elb_listener_rules(self): - try: - return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules'] + return AWSRetry.jittered_backoff()(self.connection.describe_rules)( + ListenerArn=self.current_listener["ListenerArn"] + )["Rules"] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -918,44 +1030,56 @@ class ELBListenerRules(object): # host-header: current_condition includes both HostHeaderConfig AND Values while # condition can be defined with either HostHeaderConfig OR Values. Only use # HostHeaderConfig['Values'] comparison if both conditions includes HostHeaderConfig. - if current_condition.get('HostHeaderConfig') and condition.get('HostHeaderConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['HostHeaderConfig']['Values']) == sorted(condition['HostHeaderConfig']['Values'])): + if current_condition.get("HostHeaderConfig") and condition.get("HostHeaderConfig"): + if current_condition["Field"] == condition["Field"] and sorted( + current_condition["HostHeaderConfig"]["Values"] + ) == sorted(condition["HostHeaderConfig"]["Values"]): condition_found = True break - elif current_condition.get('HttpHeaderConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['HttpHeaderConfig']['Values']) == sorted(condition['HttpHeaderConfig']['Values']) and - current_condition['HttpHeaderConfig']['HttpHeaderName'] == condition['HttpHeaderConfig']['HttpHeaderName']): + elif current_condition.get("HttpHeaderConfig"): + if ( + current_condition["Field"] == condition["Field"] + and sorted(current_condition["HttpHeaderConfig"]["Values"]) + == sorted(condition["HttpHeaderConfig"]["Values"]) + and current_condition["HttpHeaderConfig"]["HttpHeaderName"] + == condition["HttpHeaderConfig"]["HttpHeaderName"] + ): condition_found = True break - elif current_condition.get('HttpRequestMethodConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['HttpRequestMethodConfig']['Values']) == sorted(condition['HttpRequestMethodConfig']['Values'])): + elif current_condition.get("HttpRequestMethodConfig"): + if current_condition["Field"] == condition["Field"] and sorted( + current_condition["HttpRequestMethodConfig"]["Values"] + ) == sorted(condition["HttpRequestMethodConfig"]["Values"]): condition_found = True break # path-pattern: current_condition includes both PathPatternConfig AND Values while # condition can be defined with either PathPatternConfig OR Values. Only use # PathPatternConfig['Values'] comparison if both conditions includes PathPatternConfig. - elif current_condition.get('PathPatternConfig') and condition.get('PathPatternConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['PathPatternConfig']['Values']) == sorted(condition['PathPatternConfig']['Values'])): + elif current_condition.get("PathPatternConfig") and condition.get("PathPatternConfig"): + if current_condition["Field"] == condition["Field"] and sorted( + current_condition["PathPatternConfig"]["Values"] + ) == sorted(condition["PathPatternConfig"]["Values"]): condition_found = True break - elif current_condition.get('QueryStringConfig'): + elif current_condition.get("QueryStringConfig"): # QueryString Values is not sorted as it is the only list of dicts (not strings). - if (current_condition['Field'] == condition['Field'] and - current_condition['QueryStringConfig']['Values'] == condition['QueryStringConfig']['Values']): + if ( + current_condition["Field"] == condition["Field"] + and current_condition["QueryStringConfig"]["Values"] == condition["QueryStringConfig"]["Values"] + ): condition_found = True break - elif current_condition.get('SourceIpConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['SourceIpConfig']['Values']) == sorted(condition['SourceIpConfig']['Values'])): + elif current_condition.get("SourceIpConfig"): + if current_condition["Field"] == condition["Field"] and sorted( + current_condition["SourceIpConfig"]["Values"] + ) == sorted(condition["SourceIpConfig"]["Values"]): condition_found = True break # Not all fields are required to have Values list nested within a *Config dict # e.g. fields host-header/path-pattern can directly list Values - elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']): + elif current_condition["Field"] == condition["Field"] and sorted(current_condition["Values"]) == sorted( + condition["Values"] + ): condition_found = True break @@ -970,36 +1094,39 @@ class ELBListenerRules(object): modified_rule = {} # Priority - if int(current_rule['Priority']) != int(new_rule['Priority']): - modified_rule['Priority'] = new_rule['Priority'] + if int(current_rule["Priority"]) != int(new_rule["Priority"]): + modified_rule["Priority"] = new_rule["Priority"] # Actions # If the lengths of the actions are the same, we'll have to verify that the # contents of those actions are the same - if len(current_rule['Actions']) == len(new_rule['Actions']): + if len(current_rule["Actions"]) == len(new_rule["Actions"]): # if actions have just one element, compare the contents and then update if # they're different - current_actions_sorted = _sort_actions(current_rule['Actions']) - new_actions_sorted = _sort_actions(new_rule['Actions']) + copy_new_rule = deepcopy(new_rule) + current_actions_sorted = _sort_actions(current_rule["Actions"]) + new_actions_sorted = _sort_actions(copy_new_rule["Actions"]) new_current_actions_sorted = [_append_use_existing_client_secretn(i) for i in current_actions_sorted] new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted] - if [_prune_ForwardConfig(i) for i in new_current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]: - modified_rule['Actions'] = new_rule['Actions'] + if [_prune_ForwardConfig(i) for i in new_current_actions_sorted] != [ + _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret + ]: + modified_rule["Actions"] = new_rule["Actions"] # If the action lengths are different, then replace with the new actions else: - modified_rule['Actions'] = new_rule['Actions'] + modified_rule["Actions"] = new_rule["Actions"] # Conditions modified_conditions = [] - for condition in new_rule['Conditions']: - if not self._compare_condition(current_rule['Conditions'], condition): + for condition in new_rule["Conditions"]: + if not self._compare_condition(current_rule["Conditions"], condition): modified_conditions.append(condition) if modified_conditions: - modified_rule['Conditions'] = modified_conditions + modified_rule["Conditions"] = modified_conditions return modified_rule @@ -1012,34 +1139,73 @@ class ELBListenerRules(object): rules_to_modify = [] rules_to_delete = [] rules_to_add = deepcopy(self.rules) + rules_to_set_priority = [] + + # List rules to update priority, 'Actions' and 'Conditions' remain the same + # only the 'Priority' has changed + current_rules = deepcopy(self.current_rules) + remaining_rules = [] + while current_rules: + current_rule = current_rules.pop(0) + # Skip the default rule, this one can't be modified + if current_rule.get("IsDefault", False): + continue + to_keep = True + for new_rule in rules_to_add: + modified_rule = self._compare_rule(current_rule, new_rule) + if not modified_rule: + # The current rule has been passed with the same properties to the module + # Remove it for later comparison + rules_to_add.remove(new_rule) + to_keep = False + break + if modified_rule and list(modified_rule.keys()) == ["Priority"]: + # if only the Priority has changed + modified_rule["Priority"] = int(new_rule["Priority"]) + modified_rule["RuleArn"] = current_rule["RuleArn"] + + rules_to_set_priority.append(modified_rule) + to_keep = False + rules_to_add.remove(new_rule) + break + if to_keep: + remaining_rules.append(current_rule) - for current_rule in self.current_rules: + for current_rule in remaining_rules: current_rule_passed_to_module = False - for new_rule in self.rules[:]: - if current_rule['Priority'] == str(new_rule['Priority']): + for new_rule in rules_to_add: + if current_rule["Priority"] == str(new_rule["Priority"]): current_rule_passed_to_module = True # Remove what we match so that what is left can be marked as 'to be added' rules_to_add.remove(new_rule) modified_rule = self._compare_rule(current_rule, new_rule) if modified_rule: - modified_rule['Priority'] = int(current_rule['Priority']) - modified_rule['RuleArn'] = current_rule['RuleArn'] - modified_rule['Actions'] = new_rule['Actions'] - modified_rule['Conditions'] = new_rule['Conditions'] + modified_rule["Priority"] = int(current_rule["Priority"]) + modified_rule["RuleArn"] = current_rule["RuleArn"] + modified_rule["Actions"] = new_rule["Actions"] + modified_rule["Conditions"] = new_rule["Conditions"] + # You cannot both specify a client secret and set UseExistingClientSecret to true + for action in modified_rule.get("Actions", []): + if action.get("AuthenticateOidcConfig", {}).get("ClientSecret", False): + action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = False rules_to_modify.append(modified_rule) break # If the current rule was not matched against passed rules, mark for removal - if not current_rule_passed_to_module and not current_rule['IsDefault']: - rules_to_delete.append(current_rule['RuleArn']) + if not current_rule_passed_to_module and not current_rule.get("IsDefault", False): + rules_to_delete.append(current_rule["RuleArn"]) - return rules_to_add, rules_to_modify, rules_to_delete + # For rules to create 'UseExistingClientSecret' should be set to False + for rule in rules_to_add: + for action in rule.get("Actions", []): + if action.get("AuthenticateOidcConfig", {}).get("UseExistingClientSecret", False): + action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = False + return rules_to_add, rules_to_modify, rules_to_delete, rules_to_set_priority -class ELBListenerRule(object): +class ELBListenerRule: def __init__(self, connection, module, rule, listener_arn): - self.connection = connection self.module = module self.rule = rule @@ -1054,8 +1220,8 @@ class ELBListenerRule(object): """ try: - self.rule['ListenerArn'] = self.listener_arn - self.rule['Priority'] = int(self.rule['Priority']) + self.rule["ListenerArn"] = self.listener_arn + self.rule["Priority"] = int(self.rule["Priority"]) AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -1070,7 +1236,7 @@ class ELBListenerRule(object): """ try: - del self.rule['Priority'] + del self.rule["Priority"] AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -1085,7 +1251,25 @@ class ELBListenerRule(object): """ try: - AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn']) + AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule["RuleArn"]) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + + def set_rule_priorities(self): + """ + Sets the priorities of the specified rules. + + :return: + """ + + try: + rules = [self.rule] + if isinstance(self.rule, list): + rules = self.rule + rule_priorities = [{"RuleArn": rule["RuleArn"], "Priority": rule["Priority"]} for rule in rules] + AWSRetry.jittered_backoff()(self.connection.set_rule_priorities)(RulePriorities=rule_priorities) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/errors.py b/ansible_collections/amazon/aws/plugins/module_utils/errors.py new file mode 100644 index 000000000..38e9b3800 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/errors.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import functools + +try: + import botocore +except ImportError: + pass # Modules are responsible for handling this. + +from .exceptions import AnsibleAWSError + + +class AWSErrorHandler: + + """_CUSTOM_EXCEPTION can be overridden by subclasses to customize the exception raised""" + + _CUSTOM_EXCEPTION = AnsibleAWSError + + @classmethod + def _is_missing(cls): + """Should be overridden with a class method that returns the value from is_boto3_error_code (or similar)""" + return type("NeverEverRaisedException", (Exception,), {}) + + @classmethod + def common_error_handler(cls, description): + """A simple error handler that catches the standard Boto3 exceptions and raises + an AnsibleAWSError exception. + + param: description: a description of the action being taken. + Exception raised will include a message of + f"Timeout trying to {description}" or + f"Failed to {description}" + """ + + def wrapper(func): + @functools.wraps(func) + def handler(*args, **kwargs): + try: + return func(*args, **kwargs) + except botocore.exceptions.WaiterError as e: + raise cls._CUSTOM_EXCEPTION(message=f"Timeout trying to {description}", exception=e) from e + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise cls._CUSTOM_EXCEPTION(message=f"Failed to {description}", exception=e) from e + + return handler + + return wrapper + + @classmethod + def list_error_handler(cls, description, default_value=None): + """A simple error handler that catches the standard Boto3 exceptions and raises + an AnsibleAWSError exception. + Error codes representing a non-existent entity will result in None being returned + Generally used for Get/List calls where the exception just means the resource isn't there + + param: description: a description of the action being taken. + Exception raised will include a message of + f"Timeout trying to {description}" or + f"Failed to {description}" + param: default_value: the value to return if no matching + resources are returned. Defaults to None + """ + + def wrapper(func): + @functools.wraps(func) + @cls.common_error_handler(description) + def handler(*args, **kwargs): + try: + return func(*args, **kwargs) + except cls._is_missing(): + return default_value + + return handler + + return wrapper + + @classmethod + def deletion_error_handler(cls, description): + """A simple error handler that catches the standard Boto3 exceptions and raises + an AnsibleAWSError exception. + Error codes representing a non-existent entity will result in None being returned + Generally used in deletion calls where NoSuchEntity means it's already gone + + param: description: a description of the action being taken. + Exception raised will include a message of + f"Timeout trying to {description}" or + f"Failed to {description}" + """ + + def wrapper(func): + @functools.wraps(func) + @cls.common_error_handler(description) + def handler(*args, **kwargs): + try: + return func(*args, **kwargs) + except cls._is_missing(): + return False + + return handler + + return wrapper diff --git a/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py b/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py new file mode 100644 index 000000000..893a62db9 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +# (c) 2022 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.module_utils._text import to_native + + +class AnsibleAWSError(Exception): + def __str__(self): + if self.exception and self.message: + return f"{self.message}: {to_native(self.exception)}" + + return super().__str__() + + def __init__(self, message=None, exception=None, **kwargs): + if not message and not exception: + super().__init__() + elif not message: + super().__init__(exception) + else: + super().__init__(message) + + self.exception = exception + self.message = message + + # In places where passing more information to module.fail_json would be helpful + # store the extra info. Other plugin types have to raise the correct exception + # such as AnsibleLookupError, so can't easily consume this. + self.kwargs = kwargs or {} + + +class AnsibleBotocoreError(AnsibleAWSError): + pass diff --git a/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/ansible_collections/amazon/aws/plugins/module_utils/iam.py index 6ebed23ba..430823f3b 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/iam.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/iam.py @@ -1,24 +1,280 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +import re +from copy import deepcopy try: import botocore except ImportError: - pass + pass # Modules are responsible for handling this. from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from .arn import parse_aws_arn +from .arn import validate_aws_arn +from .botocore import is_boto3_error_code +from .botocore import normalize_boto3_result +from .errors import AWSErrorHandler +from .exceptions import AnsibleAWSError +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict + + +class AnsibleIAMError(AnsibleAWSError): + pass + + +class IAMErrorHandler(AWSErrorHandler): + _CUSTOM_EXCEPTION = AnsibleIAMError + + @classmethod + def _is_missing(cls): + return is_boto3_error_code("NoSuchEntity") + + +@IAMErrorHandler.deletion_error_handler("detach group policy") +@AWSRetry.jittered_backoff() +def detach_iam_group_policy(client, arn, group): + client.detach_group_policy(PolicyArn=arn, GroupName=group) + return True + + +@IAMErrorHandler.deletion_error_handler("detach role policy") +@AWSRetry.jittered_backoff() +def detach_iam_role_policy(client, arn, role): + client.detach_group_policy(PolicyArn=arn, RoleName=role) + return True + + +@IAMErrorHandler.deletion_error_handler("detach user policy") +@AWSRetry.jittered_backoff() +def detach_iam_user_policy(client, arn, user): + client.detach_group_policy(PolicyArn=arn, UserName=user) + return True + + +@AWSRetry.jittered_backoff() +def _get_iam_instance_profiles(client, **kwargs): + return client.get_instance_profile(**kwargs)["InstanceProfile"] + + +@AWSRetry.jittered_backoff() +def _list_iam_instance_profiles(client, **kwargs): + paginator = client.get_paginator("list_instance_profiles") + return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"] + + +@AWSRetry.jittered_backoff() +def _list_iam_instance_profiles_for_role(client, **kwargs): + paginator = client.get_paginator("list_instance_profiles_for_role") + return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"] + + +@IAMErrorHandler.list_error_handler("list policies for role", []) +@AWSRetry.jittered_backoff() +def list_iam_role_policies(client, role_name): + paginator = client.get_paginator("list_role_policies") + return paginator.paginate(RoleName=role_name).build_full_result()["PolicyNames"] + + +@IAMErrorHandler.list_error_handler("list policies attached to role", []) +@AWSRetry.jittered_backoff() +def list_iam_role_attached_policies(client, role_name): + paginator = client.get_paginator("list_attached_role_policies") + return paginator.paginate(RoleName=role_name).build_full_result()["AttachedPolicies"] + + +@IAMErrorHandler.list_error_handler("list users", []) +@AWSRetry.jittered_backoff() +def list_iam_users(client, path=None): + args = {} + if path is None: + args = {"PathPrefix": path} + paginator = client.get_paginator("list_users") + return paginator.paginate(**args).build_full_result()["Users"] + + +@IAMErrorHandler.common_error_handler("list all managed policies") +@AWSRetry.jittered_backoff() +def list_iam_managed_policies(client, **kwargs): + paginator = client.get_paginator("list_policies") + return paginator.paginate(**kwargs).build_full_result()["Policies"] + + +list_managed_policies = list_iam_managed_policies + + +@IAMErrorHandler.list_error_handler("list entities for policy", []) +@AWSRetry.jittered_backoff() +def list_iam_entities_for_policy(client, arn): + paginator = client.get_paginator("list_entities_for_policy") + return paginator.paginate(PolicyArn=arn).build_full_result() + + +@IAMErrorHandler.list_error_handler("list roles", []) +@AWSRetry.jittered_backoff() +def list_iam_roles(client, path=None): + args = {} + if path: + args["PathPrefix"] = path + paginator = client.get_paginator("list_roles") + return paginator.paginate(**args).build_full_result()["Roles"] + + +@IAMErrorHandler.list_error_handler("list mfa devices", []) +@AWSRetry.jittered_backoff() +def list_iam_mfa_devices(client, user=None): + args = {} + if user: + args["UserName"] = user + paginator = client.get_paginator("list_mfa_devices") + return paginator.paginate(**args).build_full_result()["MFADevices"] + + +@IAMErrorHandler.list_error_handler("get role") +@AWSRetry.jittered_backoff() +def get_iam_role(client, name): + return client.get_role(RoleName=name)["Role"] + + +@IAMErrorHandler.list_error_handler("get group") +@AWSRetry.jittered_backoff() +def get_iam_group(client, name): + paginator = client.get_paginator("get_group") + return paginator.paginate(GroupName=name).build_full_result() + + +@IAMErrorHandler.list_error_handler("get access keys for user", []) +@AWSRetry.jittered_backoff() +def get_iam_access_keys(client, user): + results = client.list_access_keys(UserName=user) + return normalize_iam_access_keys(results.get("AccessKeyMetadata", [])) + + +@IAMErrorHandler.list_error_handler("get user") +@AWSRetry.jittered_backoff() +def get_iam_user(client, user): + results = client.get_user(UserName=user) + return normalize_iam_user(results.get("User", [])) + + +def find_iam_managed_policy_by_name(client, name): + policies = list_iam_managed_policies(client) + for policy in policies: + if policy["PolicyName"] == name: + return policy + return None + + +def get_iam_managed_policy_by_name(client, name): + # get_policy() requires an ARN, and list_policies() doesn't return all fields, so we need to do both :( + policy = find_iam_managed_policy_by_name(client, name) + if policy is None: + return None + return get_iam_managed_policy_by_arn(client, policy["Arn"]) + + +@IAMErrorHandler.common_error_handler("get policy") +@AWSRetry.jittered_backoff() +def get_iam_managed_policy_by_arn(client, arn): + policy = client.get_policy(PolicyArn=arn)["Policy"] + return policy + + +@IAMErrorHandler.common_error_handler("list policy versions") +@AWSRetry.jittered_backoff() +def list_iam_managed_policy_versions(client, arn): + return client.list_policy_versions(PolicyArn=arn)["Versions"] + + +@IAMErrorHandler.common_error_handler("get policy version") +@AWSRetry.jittered_backoff() +def get_iam_managed_policy_version(client, arn, version): + return client.get_policy_version(PolicyArn=arn, VersionId=version)["PolicyVersion"] + + +def normalize_iam_mfa_device(device): + """Converts IAM MFA Device from the CamelCase boto3 format to the snake_case Ansible format""" + if not device: + return device + camel_device = camel_dict_to_snake_dict(device) + camel_device["tags"] = boto3_tag_list_to_ansible_dict(device.pop("Tags", [])) + return camel_device -from .ec2 import AWSRetry -from .core import is_boto3_error_code -from .core import parse_aws_arn + +def normalize_iam_mfa_devices(devices): + """Converts a list of IAM MFA Devices from the CamelCase boto3 format to the snake_case Ansible format""" + if not devices: + return [] + devices = [normalize_iam_mfa_device(d) for d in devices] + return devices + + +def normalize_iam_user(user): + """Converts IAM users from the CamelCase boto3 format to the snake_case Ansible format""" + if not user: + return user + camel_user = camel_dict_to_snake_dict(user) + camel_user["tags"] = boto3_tag_list_to_ansible_dict(user.pop("Tags", [])) + return camel_user + + +def normalize_iam_policy(policy): + """Converts IAM policies from the CamelCase boto3 format to the snake_case Ansible format""" + if not policy: + return policy + camel_policy = camel_dict_to_snake_dict(policy) + camel_policy["tags"] = boto3_tag_list_to_ansible_dict(policy.get("Tags", [])) + return camel_policy + + +def normalize_iam_group(group): + """Converts IAM Groups from the CamelCase boto3 format to the snake_case Ansible format""" + if not group: + return group + camel_group = camel_dict_to_snake_dict(normalize_boto3_result(group)) + return camel_group + + +def normalize_iam_access_key(access_key): + """Converts IAM access keys from the CamelCase boto3 format to the snake_case Ansible format""" + if not access_key: + return access_key + camel_key = camel_dict_to_snake_dict(normalize_boto3_result(access_key)) + return camel_key + + +def normalize_iam_access_keys(access_keys): + """Converts a list of IAM access keys from the CamelCase boto3 format to the snake_case Ansible format""" + if not access_keys: + return [] + access_keys = [normalize_iam_access_key(k) for k in access_keys] + sorted_keys = sorted(access_keys, key=lambda d: d.get("create_date", None)) + return sorted_keys + + +def convert_managed_policy_names_to_arns(client, policy_names): + if all(validate_aws_arn(policy, service="iam") for policy in policy_names if policy is not None): + return policy_names + allpolicies = {} + policies = list_iam_managed_policies(client) + + for policy in policies: + allpolicies[policy["PolicyName"]] = policy["Arn"] + allpolicies[policy["Arn"]] = policy["Arn"] + try: + return [allpolicies[policy] for policy in policy_names if policy is not None] + except KeyError as e: + raise AnsibleIAMError(message="Failed to find policy by name:" + str(e), exception=e) from e def get_aws_account_id(module): - """ Given an AnsibleAWSModule instance, get the active AWS account ID - """ + """Given an AnsibleAWSModule instance, get the active AWS account ID""" return get_aws_account_info(module)[0] @@ -40,36 +296,204 @@ def get_aws_account_info(module): account_id = None partition = None try: - sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + sts_client = module.client("sts", retry_decorator=AWSRetry.jittered_backoff()) caller_id = sts_client.get_caller_identity(aws_retry=True) - account_id = caller_id.get('Account') - partition = caller_id.get('Arn').split(':')[1] + account_id = caller_id.get("Account") + partition = caller_id.get("Arn").split(":")[1] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError): try: - iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':') - except is_boto3_error_code('AccessDenied') as e: + iam_client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)["User"][ + "Arn" + ].split(":") + except is_boto3_error_code("AccessDenied") as e: try: except_msg = to_native(e.message) except AttributeError: except_msg = to_native(e) result = parse_aws_arn(except_msg) - if result is None or result['service'] != 'iam': + if result is None or result["service"] != "iam": module.fail_json_aws( e, - msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.", ) - account_id = result.get('account_id') - partition = result.get('partition') - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + account_id = result.get("account_id") + partition = result.get("partition") + except ( # pylint: disable=duplicate-except + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: module.fail_json_aws( e, - msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.", ) if account_id is None or partition is None: module.fail_json( - msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.", ) return (to_native(account_id), to_native(partition)) + + +@IAMErrorHandler.common_error_handler("create instance profile") +@AWSRetry.jittered_backoff() +def create_iam_instance_profile(client, name, path, tags): + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + path = path or "/" + result = client.create_instance_profile(InstanceProfileName=name, Path=path, Tags=boto3_tags) + return result["InstanceProfile"] + + +@IAMErrorHandler.deletion_error_handler("delete instance profile") +@AWSRetry.jittered_backoff() +def delete_iam_instance_profile(client, name): + client.delete_instance_profile(InstanceProfileName=name) + # Error Handler will return False if the resource didn't exist + return True + + +@IAMErrorHandler.common_error_handler("add role to instance profile") +@AWSRetry.jittered_backoff() +def add_role_to_iam_instance_profile(client, profile_name, role_name): + client.add_role_to_instance_profile(InstanceProfileName=profile_name, RoleName=role_name) + return True + + +@IAMErrorHandler.deletion_error_handler("remove role from instance profile") +@AWSRetry.jittered_backoff() +def remove_role_from_iam_instance_profile(client, profile_name, role_name): + client.remove_role_from_instance_profile(InstanceProfileName=profile_name, RoleName=role_name) + # Error Handler will return False if the resource didn't exist + return True + + +@IAMErrorHandler.list_error_handler("list instance profiles", []) +def list_iam_instance_profiles(client, name=None, prefix=None, role=None): + """ + Returns a list of IAM instance profiles in boto3 format. + Profiles need to be converted to Ansible format using normalize_iam_instance_profile before being displayed. + + See also: normalize_iam_instance_profile + """ + if role: + return _list_iam_instance_profiles_for_role(client, RoleName=role) + if name: + # Unlike the others this returns a single result, make this a list with 1 element. + return [_get_iam_instance_profiles(client, InstanceProfileName=name)] + if prefix: + return _list_iam_instance_profiles(client, PathPrefix=prefix) + return _list_iam_instance_profiles(client) + + +def normalize_iam_instance_profile(profile, _v7_compat=False): + """ + Converts a boto3 format IAM instance profile into "Ansible" format + + _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE. + """ + + new_profile = camel_dict_to_snake_dict(deepcopy(profile)) + if profile.get("Roles"): + new_profile["roles"] = [normalize_iam_role(role, _v7_compat=_v7_compat) for role in profile.get("Roles")] + if profile.get("Tags"): + new_profile["tags"] = boto3_tag_list_to_ansible_dict(profile.get("Tags")) + else: + new_profile["tags"] = {} + new_profile["original"] = profile + return new_profile + + +def normalize_iam_role(role, _v7_compat=False): + """ + Converts a boto3 format IAM instance role into "Ansible" format + + _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE. + """ + + new_role = camel_dict_to_snake_dict(deepcopy(role)) + if role.get("InstanceProfiles"): + new_role["instance_profiles"] = [ + normalize_iam_instance_profile(profile, _v7_compat=_v7_compat) for profile in role.get("InstanceProfiles") + ] + if role.get("AssumeRolePolicyDocument"): + if _v7_compat: + # new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument") + new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument") + else: + new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument") + + new_role["tags"] = boto3_tag_list_to_ansible_dict(role.get("Tags", [])) + return new_role + + +@IAMErrorHandler.common_error_handler("tag instance profile") +@AWSRetry.jittered_backoff() +def tag_iam_instance_profile(client, name, tags): + if not tags: + return + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + result = client.tag_instance_profile(InstanceProfileName=name, Tags=boto3_tags) + + +@IAMErrorHandler.common_error_handler("untag instance profile") +@AWSRetry.jittered_backoff() +def untag_iam_instance_profile(client, name, tags): + if not tags: + return + client.untag_instance_profile(InstanceProfileName=name, TagKeys=tags) + + +@IAMErrorHandler.common_error_handler("tag managed policy") +@AWSRetry.jittered_backoff() +def tag_iam_policy(client, arn, tags): + if not tags: + return + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + client.tag_policy(PolicyArn=arn, Tags=boto3_tags) + + +@IAMErrorHandler.common_error_handler("untag managed policy") +@AWSRetry.jittered_backoff() +def untag_iam_policy(client, arn, tags): + if not tags: + return + client.untag_policy(PolicyArn=arn, TagKeys=tags) + + +def _validate_iam_name(resource_type, name=None): + if name is None: + return None + LENGTHS = {"role": 64, "user": 64} + regex = r"[\w+=,.@-]+" + max_length = LENGTHS.get(resource_type, 128) + if len(name) > max_length: + return f"Length of {resource_type} name may not exceed {max_length}" + if not re.fullmatch(regex, name): + return f"{resource_type} name must match pattern {regex}" + return None + + +def _validate_iam_path(resource_type, path=None): + if path is None: + return None + regex = r"\/([\w+=,.@-]+\/)*" + max_length = 512 + if len(path) > max_length: + return f"Length of {resource_type} path may not exceed {max_length}" + if not path.endswith("/") or not path.startswith("/"): + return f"{resource_type} path must begin and end with /" + if not re.fullmatch(regex, path): + return f"{resource_type} path must match pattern {regex}" + return None + + +def validate_iam_identifiers(resource_type, name=None, path=None): + name_problem = _validate_iam_name(resource_type, name) + if name_problem: + return name_problem + path_problem = _validate_iam_path(resource_type, path) + if path_problem: + return path_problem + + return None diff --git a/ansible_collections/amazon/aws/plugins/module_utils/modules.py b/ansible_collections/amazon/aws/plugins/module_utils/modules.py index 7d4ba717f..8a2ff3c0b 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/modules.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/modules.py @@ -1,20 +1,7 @@ -# +# -*- coding: utf-8 -*- + # Copyright 2017 Michael De La Rue | Ansible -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """This module adds shared support for generic Amazon AWS modules @@ -50,41 +37,38 @@ The call will be retried the specified number of times, so the calling functions don't need to be wrapped in the backoff decorator. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from functools import wraps import logging import os import re import traceback - try: from cStringIO import StringIO except ImportError: # Python 3 from io import StringIO +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils._text import to_native -from .botocore import HAS_BOTO3 +from .botocore import boto3_at_least from .botocore import boto3_conn +from .botocore import botocore_at_least +from .botocore import check_sdk_version_supported +from .botocore import gather_sdk_versions from .botocore import get_aws_connection_info from .botocore import get_aws_region -from .botocore import gather_sdk_versions - -from .version import LooseVersion +from .exceptions import AnsibleBotocoreError +from .retries import RetryingBotoClientWrapper # Currently only AnsibleAWSModule. However we have a lot of Copy and Paste code # for Inventory and Lookup modules which we should refactor -class AnsibleAWSModule(object): +class AnsibleAWSModule: """An ansible module class for AWS modules AnsibleAWSModule provides an a class for building modules which @@ -95,12 +79,8 @@ class AnsibleAWSModule(object): (available on #ansible-aws on IRC) to request the additional features needed. """ - default_settings = { - "default_args": True, - "check_boto3": True, - "auto_retry": True, - "module_class": AnsibleModule - } + + default_settings = {"default_args": True, "check_boto3": True, "auto_retry": True, "module_class": AnsibleModule} def __init__(self, **kwargs): local_settings = {} @@ -122,40 +102,40 @@ class AnsibleAWSModule(object): self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs) if local_settings["check_boto3"]: - if not HAS_BOTO3: - self._module.fail_json( - msg=missing_required_lib('botocore and boto3')) - if not self.botocore_at_least('1.21.0'): - self.warn('botocore < 1.21.0 is not supported or tested.' - ' Some features may not work.') - if not self.boto3_at_least("1.18.0"): - self.warn('boto3 < 1.18.0 is not supported or tested.' - ' Some features may not work.') - - deprecated_vars = {'EC2_REGION', 'EC2_SECURITY_TOKEN', 'EC2_SECRET_KEY', 'EC2_ACCESS_KEY', - 'EC2_URL', 'S3_URL'} + try: + check_sdk_version_supported(warn=self.warn) + except AnsibleBotocoreError as e: + self._module.fail_json(to_native(e)) + + deprecated_vars = {"EC2_REGION", "EC2_SECURITY_TOKEN", "EC2_SECRET_KEY", "EC2_ACCESS_KEY", "EC2_URL", "S3_URL"} if deprecated_vars.intersection(set(os.environ.keys())): self._module.deprecate( - "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', " - "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment " - "variables has been deprecated. " - "These variables are currently used for all AWS services which can " - "cause confusion. We recomend using the relevant module " - "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', " - "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' " - "environment variables can be used instead.", - date='2024-12-01', collection_name='amazon.aws', + ( + "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', " + "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment " + "variables has been deprecated. " + "These variables are currently used for all AWS services which can " + "cause confusion. We recomend using the relevant module " + "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', " + "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' " + "environment variables can be used instead." + ), + date="2024-12-01", + collection_name="amazon.aws", ) - if 'AWS_SECURITY_TOKEN' in os.environ.keys(): + if "AWS_SECURITY_TOKEN" in os.environ.keys(): self._module.deprecate( - "Support for the 'AWS_SECURITY_TOKEN' environment variable " - "has been deprecated. This variable was based on the original " - "boto SDK, support for which has now been dropped. " - "We recommend using the 'session_token' module parameter " - "or alternatively the 'AWS_SESSION_TOKEN' environment variable " - "can be used instead.", - date='2024-12-01', collection_name='amazon.aws', + ( + "Support for the 'AWS_SECURITY_TOKEN' environment variable " + "has been deprecated. This variable was based on the original " + "boto SDK, support for which has now been dropped. " + "We recommend using the 'session_token' module parameter " + "or alternatively the 'AWS_SESSION_TOKEN' environment variable " + "can be used instead." + ), + date="2024-12-01", + collection_name="amazon.aws", ) self.check_mode = self._module.check_mode @@ -164,8 +144,8 @@ class AnsibleAWSModule(object): self._botocore_endpoint_log_stream = StringIO() self.logger = None - if self.params.get('debug_botocore_endpoint_logs'): - self.logger = logging.getLogger('botocore.endpoint') + if self.params.get("debug_botocore_endpoint_logs"): + self.logger = logging.getLogger("botocore.endpoint") self.logger.setLevel(logging.DEBUG) self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream)) @@ -175,7 +155,7 @@ class AnsibleAWSModule(object): def _get_resource_action_list(self): actions = [] - for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'): + for ln in self._botocore_endpoint_log_stream.getvalue().split("\n"): ln = ln.strip() if not ln: continue @@ -183,17 +163,17 @@ class AnsibleAWSModule(object): if found_operational_request: operation_request = found_operational_request.group(0)[20:-1] resource = re.search(r"https://.*?\.", ln).group(0)[8:-1] - actions.append("{0}:{1}".format(resource, operation_request)) + actions.append(f"{resource}:{operation_request}") return list(set(actions)) def exit_json(self, *args, **kwargs): - if self.params.get('debug_botocore_endpoint_logs'): - kwargs['resource_actions'] = self._get_resource_action_list() + if self.params.get("debug_botocore_endpoint_logs"): + kwargs["resource_actions"] = self._get_resource_action_list() return self._module.exit_json(*args, **kwargs) def fail_json(self, *args, **kwargs): - if self.params.get('debug_botocore_endpoint_logs'): - kwargs['resource_actions'] = self._get_resource_action_list() + if self.params.get("debug_botocore_endpoint_logs"): + kwargs["resource_actions"] = self._get_resource_action_list() return self._module.fail_json(*args, **kwargs) def debug(self, *args, **kwargs): @@ -211,16 +191,18 @@ class AnsibleAWSModule(object): def md5(self, *args, **kwargs): return self._module.md5(*args, **kwargs) - def client(self, service, retry_decorator=None): + def client(self, service, retry_decorator=None, **extra_params): region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) - conn = boto3_conn(self, conn_type='client', resource=service, - region=region, endpoint=endpoint_url, **aws_connect_kwargs) - return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator) + kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args.update(extra_params) + conn = boto3_conn(self, conn_type="client", resource=service, **kw_args) + return conn if retry_decorator is None else RetryingBotoClientWrapper(conn, retry_decorator) - def resource(self, service): + def resource(self, service, **extra_params): region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) - return boto3_conn(self, conn_type='resource', resource=service, - region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args.update(extra_params) + return boto3_conn(self, conn_type="resource", resource=service, **kw_args) @property def region(self): @@ -242,7 +224,7 @@ class AnsibleAWSModule(object): except_msg = to_native(exception) if msg is not None: - message = '{0}: {1}'.format(msg, except_msg) + message = f"{msg}: {except_msg}" else: message = except_msg @@ -251,11 +233,7 @@ class AnsibleAWSModule(object): except AttributeError: response = None - failure = dict( - msg=message, - exception=last_traceback, - **self._gather_versions() - ) + failure = dict(msg=message, exception=last_traceback, **self._gather_versions()) failure.update(kwargs) @@ -264,6 +242,12 @@ class AnsibleAWSModule(object): self.fail_json(**failure) + def fail_json_aws_error(self, exception): + """A helper to call the right failure mode after catching an AnsibleAWSError""" + if exception.exception: + self.fail_json_aws(exception.exception, msg=exception.message) + self.fail_json(msg=exception.message) + def _gather_versions(self): """Gather AWS SDK (boto3 and botocore) dependency versions @@ -287,20 +271,12 @@ class AnsibleAWSModule(object): """ if not self.boto3_at_least(desired): self._module.fail_json( - msg=missing_required_lib('boto3>={0}'.format(desired), **kwargs), - **self._gather_versions() + msg=missing_required_lib(f"boto3>={desired}", **kwargs), + **self._gather_versions(), ) def boto3_at_least(self, desired): - """Check if the available boto3 version is greater than or equal to a desired version. - - Usage: - if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'): - # conditionally fail on old boto3 versions if a specific feature is not supported - module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.") - """ - existing = self._gather_versions() - return LooseVersion(existing['boto3_version']) >= LooseVersion(desired) + return boto3_at_least(desired) def require_botocore_at_least(self, desired, **kwargs): """Check if the available botocore version is greater than or equal to a desired version. @@ -317,55 +293,12 @@ class AnsibleAWSModule(object): """ if not self.botocore_at_least(desired): self._module.fail_json( - msg=missing_required_lib('botocore>={0}'.format(desired), **kwargs), - **self._gather_versions() + msg=missing_required_lib(f"botocore>={desired}", **kwargs), + **self._gather_versions(), ) def botocore_at_least(self, desired): - """Check if the available botocore version is greater than or equal to a desired version. - - Usage: - if not module.botocore_at_least('1.2.3'): - module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3') - if not module.botocore_at_least('1.5.3'): - module.warn('Botocore did not include waiters for Service X before 1.5.3. ' - 'To wait until Service X resources are fully available, update botocore.') - """ - existing = self._gather_versions() - return LooseVersion(existing['botocore_version']) >= LooseVersion(desired) - - -class _RetryingBotoClientWrapper(object): - __never_wait = ( - 'get_paginator', 'can_paginate', - 'get_waiter', 'generate_presigned_url', - ) - - def __init__(self, client, retry): - self.client = client - self.retry = retry - - def _create_optional_retry_wrapper_function(self, unwrapped): - retrying_wrapper = self.retry(unwrapped) - - @wraps(unwrapped) - def deciding_wrapper(aws_retry=False, *args, **kwargs): - if aws_retry: - return retrying_wrapper(*args, **kwargs) - else: - return unwrapped(*args, **kwargs) - return deciding_wrapper - - def __getattr__(self, name): - unwrapped = getattr(self.client, name) - if name in self.__never_wait: - return unwrapped - elif callable(unwrapped): - wrapped = self._create_optional_retry_wrapper_function(unwrapped) - setattr(self, name, wrapped) - return wrapped - else: - return unwrapped + return botocore_at_least(desired) def _aws_common_argument_spec(): @@ -376,55 +309,58 @@ def _aws_common_argument_spec(): """ return dict( access_key=dict( - aliases=['aws_access_key_id', 'aws_access_key', 'ec2_access_key'], + aliases=["aws_access_key_id", "aws_access_key", "ec2_access_key"], deprecated_aliases=[ - dict(name='ec2_access_key', date='2024-12-01', collection_name='amazon.aws'), + dict(name="ec2_access_key", date="2024-12-01", collection_name="amazon.aws"), ], + fallback=(env_fallback, ["AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", "EC2_ACCESS_KEY"]), no_log=False, ), secret_key=dict( - aliases=['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'], + aliases=["aws_secret_access_key", "aws_secret_key", "ec2_secret_key"], deprecated_aliases=[ - dict(name='ec2_secret_key', date='2024-12-01', collection_name='amazon.aws'), + dict(name="ec2_secret_key", date="2024-12-01", collection_name="amazon.aws"), ], + fallback=(env_fallback, ["AWS_SECRET_ACCESS_KEY", "AWS_SECRET_KEY", "EC2_SECRET_KEY"]), no_log=True, ), session_token=dict( - aliases=['aws_session_token', 'security_token', 'access_token', 'aws_security_token'], + aliases=["aws_session_token", "security_token", "access_token", "aws_security_token"], deprecated_aliases=[ - dict(name='access_token', date='2024-12-01', collection_name='amazon.aws'), - dict(name='security_token', date='2024-12-01', collection_name='amazon.aws'), - dict(name='aws_security_token', date='2024-12-01', collection_name='amazon.aws'), + dict(name="access_token", date="2024-12-01", collection_name="amazon.aws"), + dict(name="security_token", date="2024-12-01", collection_name="amazon.aws"), + dict(name="aws_security_token", date="2024-12-01", collection_name="amazon.aws"), ], + fallback=(env_fallback, ["AWS_SESSION_TOKEN", "AWS_SECURITY_TOKEN", "EC2_SECURITY_TOKEN"]), no_log=True, ), profile=dict( - aliases=['aws_profile'], + aliases=["aws_profile"], + fallback=(env_fallback, ["AWS_PROFILE", "AWS_DEFAULT_PROFILE"]), ), - endpoint_url=dict( - aliases=['aws_endpoint_url', 'ec2_url', 's3_url'], + aliases=["aws_endpoint_url", "ec2_url", "s3_url"], deprecated_aliases=[ - dict(name='ec2_url', date='2024-12-01', collection_name='amazon.aws'), - dict(name='s3_url', date='2024-12-01', collection_name='amazon.aws'), + dict(name="ec2_url", date="2024-12-01", collection_name="amazon.aws"), + dict(name="s3_url", date="2024-12-01", collection_name="amazon.aws"), ], - fallback=(env_fallback, ['AWS_URL', 'EC2_URL', 'S3_URL']), + fallback=(env_fallback, ["AWS_URL", "EC2_URL", "S3_URL"]), ), validate_certs=dict( - type='bool', + type="bool", default=True, ), aws_ca_bundle=dict( - type='path', - fallback=(env_fallback, ['AWS_CA_BUNDLE']), + type="path", + fallback=(env_fallback, ["AWS_CA_BUNDLE"]), ), aws_config=dict( - type='dict', + type="dict", ), debug_botocore_endpoint_logs=dict( - type='bool', + type="bool", default=False, - fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), + fallback=(env_fallback, ["ANSIBLE_DEBUG_BOTOCORE_LOGS"]), ), ) @@ -435,11 +371,11 @@ def aws_argument_spec(): """ region_spec = dict( region=dict( - aliases=['aws_region', 'ec2_region'], + aliases=["aws_region", "ec2_region"], deprecated_aliases=[ - dict(name='ec2_region', date='2024-12-01', collection_name='amazon.aws'), + dict(name="ec2_region", date="2024-12-01", collection_name="amazon.aws"), ], - fallback=(env_fallback, ['AWS_REGION', 'AWS_DEFAULT_REGION', 'EC2_REGION']), + fallback=(env_fallback, ["AWS_REGION", "AWS_DEFAULT_REGION", "EC2_REGION"]), ), ) spec = _aws_common_argument_spec() diff --git a/ansible_collections/amazon/aws/plugins/module_utils/policy.py b/ansible_collections/amazon/aws/plugins/module_utils/policy.py index 4aeabd5f2..60b096f84 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/policy.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/policy.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -26,33 +28,57 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from functools import cmp_to_key +import ansible.module_utils.common.warnings as ansible_warnings from ansible.module_utils._text import to_text from ansible.module_utils.six import binary_type from ansible.module_utils.six import string_types +def _canonify_root_arn(arn): + # There are multiple ways to specifiy delegation of access to an account + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-accounts + if arn.startswith("arn:aws:iam::") and arn.endswith(":root"): + arn = arn.split(":")[4] + return arn + + +def _canonify_policy_dict_item(item, key): + """ + Converts special cases where there are multiple ways to write the same thing into a single form + """ + # There are multiple ways to specify anonymous principals + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-anonymous + if key in ["NotPrincipal", "Principal"]: + if item == "*": + return {"AWS": "*"} + return item + + +def _tuplify_list(element): + if isinstance(element, list): + return tuple(element) + return element + + def _hashable_policy(policy, policy_list): """ - Takes a policy and returns a list, the contents of which are all hashable and sorted. - Example input policy: - {'Version': '2012-10-17', - 'Statement': [{'Action': 's3:PutObjectAcl', - 'Sid': 'AddCannedAcl2', - 'Resource': 'arn:aws:s3:::test_policy/*', - 'Effect': 'Allow', - 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} - }]} - Returned value: - [('Statement', ((('Action', ('s3:PutObjectAcl',)), - ('Effect', ('Allow',)), - ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))), - ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))), - ('Version', ('2012-10-17',)))] + Takes a policy and returns a list, the contents of which are all hashable and sorted. + Example input policy: + {'Version': '2012-10-17', + 'Statement': [{'Action': 's3:PutObjectAcl', + 'Sid': 'AddCannedAcl2', + 'Resource': 'arn:aws:s3:::test_policy/*', + 'Effect': 'Allow', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + }]} + Returned value: + [('Statement', ((('Action', ('s3:PutObjectAcl',)), + ('Effect', ('Allow',)), + ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))), + ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))), + ('Version', ('2012-10-17',)))] """ # Amazon will automatically convert bool and int to strings for us @@ -63,30 +89,24 @@ def _hashable_policy(policy, policy_list): if isinstance(policy, list): for each in policy: - tupleified = _hashable_policy(each, []) - if isinstance(tupleified, list): - tupleified = tuple(tupleified) + hashed_policy = _hashable_policy(each, []) + tupleified = _tuplify_list(hashed_policy) policy_list.append(tupleified) elif isinstance(policy, string_types) or isinstance(policy, binary_type): policy = to_text(policy) # convert root account ARNs to just account IDs - if policy.startswith('arn:aws:iam::') and policy.endswith(':root'): - policy = policy.split(':')[4] + policy = _canonify_root_arn(policy) return [policy] elif isinstance(policy, dict): + # Sort the keys to ensure a consistent order for later comparison sorted_keys = list(policy.keys()) sorted_keys.sort() for key in sorted_keys: - element = policy[key] - # Special case defined in - # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html - if key in ["NotPrincipal", "Principal"] and policy[key] == "*": - element = {"AWS": "*"} - tupleified = _hashable_policy(element, []) - if isinstance(tupleified, list): - tupleified = tuple(tupleified) + # Converts special cases to a consistent form + element = _canonify_policy_dict_item(policy[key], key) + hashed_policy = _hashable_policy(element, []) + tupleified = _tuplify_list(hashed_policy) policy_list.append((key, tupleified)) - # ensure we aren't returning deeply nested structures of length 1 if len(policy_list) == 1 and isinstance(policy_list[0], tuple): policy_list = policy_list[0] @@ -96,7 +116,7 @@ def _hashable_policy(policy, policy_list): def _py3cmp(a, b): - """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3.""" + """Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3.""" try: if a > b: return 1 @@ -107,8 +127,8 @@ def _py3cmp(a, b): except TypeError as e: # check to see if they're tuple-string # always say strings are less than tuples (to maintain compatibility with python2) - str_ind = to_text(e).find('str') - tup_ind = to_text(e).find('tuple') + str_ind = to_text(e).find("str") + tup_ind = to_text(e).find("tuple") if -1 not in (str_ind, tup_ind): if str_ind < tup_ind: return -1 @@ -118,8 +138,8 @@ def _py3cmp(a, b): def compare_policies(current_policy, new_policy, default_version="2008-10-17"): - """ Compares the existing policy and the updated policy - Returns True if there is a difference between policies. + """Compares the existing policy and the updated policy + Returns True if there is a difference between policies. """ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html if default_version: @@ -134,8 +154,10 @@ def compare_policies(current_policy, new_policy, default_version="2008-10-17"): def sort_json_policy_dict(policy_dict): + """ + DEPRECATED - will be removed in amazon.aws 8.0.0 - """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but + Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but different orders will return true Args: policy_dict (dict): Dict representing IAM JSON policy. @@ -151,8 +173,16 @@ def sort_json_policy_dict(policy_dict): } """ - def value_is_list(my_list): + ansible_warnings.deprecate( + ( + "amazon.aws.module_utils.policy.sort_json_policy_dict has been deprecated, consider using " + "amazon.aws.module_utils.policy.compare_policies instead" + ), + version="8.0.0", + collection_name="amazon.aws", + ) + def value_is_list(my_list): checked_list = [] for item in my_list: if isinstance(item, dict): diff --git a/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/ansible_collections/amazon/aws/plugins/module_utils/rds.py index 8b5bcb67c..85cde2e4e 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/rds.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/rds.py @@ -1,54 +1,85 @@ +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from collections import namedtuple from time import sleep try: - from botocore.exceptions import BotoCoreError, ClientError, WaiterError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError except ImportError: pass from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from .ec2 import AWSRetry -from .ec2 import ansible_dict_to_boto3_tag_list -from .ec2 import boto3_tag_list_to_ansible_dict -from .ec2 import compare_aws_tags +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict +from .tagging import compare_aws_tags from .waiters import get_waiter -Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'resource', 'retry_codes']) +Boto3ClientMethod = namedtuple( + "Boto3ClientMethod", ["name", "waiter", "operation_description", "resource", "retry_codes"] +) # Whitelist boto3 client methods for cluster and instance resources cluster_method_names = [ - 'create_db_cluster', 'restore_db_cluster_from_snapshot', 'restore_db_cluster_from_s3', - 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource', - 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster' + "create_db_cluster", + "restore_db_cluster_from_snapshot", + "restore_db_cluster_from_s3", + "restore_db_cluster_to_point_in_time", + "modify_db_cluster", + "delete_db_cluster", + "add_tags_to_resource", + "remove_tags_from_resource", + "list_tags_for_resource", + "promote_read_replica_db_cluster", + "stop_db_cluster", + "start_db_cluster", ] instance_method_names = [ - 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3', - 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance', - 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource', - 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance', 'add_role_to_db_instance', - 'remove_role_from_db_instance' + "create_db_instance", + "restore_db_instance_to_point_in_time", + "restore_db_instance_from_s3", + "restore_db_instance_from_db_snapshot", + "create_db_instance_read_replica", + "modify_db_instance", + "delete_db_instance", + "add_tags_to_resource", + "remove_tags_from_resource", + "list_tags_for_resource", + "promote_read_replica", + "stop_db_instance", + "start_db_instance", + "reboot_db_instance", + "add_role_to_db_instance", + "remove_role_from_db_instance", ] cluster_snapshot_method_names = [ - 'create_db_cluster_snapshot', 'delete_db_cluster_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource', - 'list_tags_for_resource', 'copy_db_cluster_snapshot' + "create_db_cluster_snapshot", + "delete_db_cluster_snapshot", + "add_tags_to_resource", + "remove_tags_from_resource", + "list_tags_for_resource", + "copy_db_cluster_snapshot", ] instance_snapshot_method_names = [ - 'create_db_snapshot', 'delete_db_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource', - 'copy_db_snapshot', 'list_tags_for_resource' + "create_db_snapshot", + "delete_db_snapshot", + "add_tags_to_resource", + "remove_tags_from_resource", + "copy_db_snapshot", + "list_tags_for_resource", ] def get_rds_method_attribute(method_name, module): - ''' + """ Returns rds attributes of the specified method. Parameters: @@ -66,134 +97,152 @@ def get_rds_method_attribute(method_name, module): Raises: NotImplementedError if wait is True but no waiter can be found for specified method - ''' - waiter = '' - readable_op = method_name.replace('_', ' ').replace('db', 'DB') - resource = '' + """ + waiter = "" + readable_op = method_name.replace("_", " ").replace("db", "DB") + resource = "" retry_codes = [] - if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params: - resource = 'cluster' - if method_name == 'delete_db_cluster': - waiter = 'cluster_deleted' + if method_name in cluster_method_names and "new_db_cluster_identifier" in module.params: + resource = "cluster" + if method_name == "delete_db_cluster": + waiter = "cluster_deleted" else: - waiter = 'cluster_available' + waiter = "cluster_available" # Handle retry codes - if method_name == 'restore_db_cluster_from_snapshot': - retry_codes = ['InvalidDBClusterSnapshotState'] + if method_name == "restore_db_cluster_from_snapshot": + retry_codes = ["InvalidDBClusterSnapshotState"] else: - retry_codes = ['InvalidDBClusterState'] - elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params: - resource = 'instance' - if method_name == 'delete_db_instance': - waiter = 'db_instance_deleted' - elif method_name == 'stop_db_instance': - waiter = 'db_instance_stopped' - elif method_name == 'add_role_to_db_instance': - waiter = 'role_associated' - elif method_name == 'remove_role_from_db_instance': - waiter = 'role_disassociated' - elif method_name == 'promote_read_replica': - waiter = 'read_replica_promoted' + retry_codes = ["InvalidDBClusterState"] + elif method_name in instance_method_names and "new_db_instance_identifier" in module.params: + resource = "instance" + if method_name == "delete_db_instance": + waiter = "db_instance_deleted" + elif method_name == "stop_db_instance": + waiter = "db_instance_stopped" + elif method_name == "add_role_to_db_instance": + waiter = "role_associated" + elif method_name == "remove_role_from_db_instance": + waiter = "role_disassociated" + elif method_name == "promote_read_replica": + waiter = "read_replica_promoted" + elif method_name == "db_cluster_promoting": + waiter = "db_cluster_promoting" else: - waiter = 'db_instance_available' + waiter = "db_instance_available" # Handle retry codes - if method_name == 'restore_db_instance_from_db_snapshot': - retry_codes = ['InvalidDBSnapshotState'] + if method_name == "restore_db_instance_from_db_snapshot": + retry_codes = ["InvalidDBSnapshotState"] else: - retry_codes = ['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] - elif method_name in cluster_snapshot_method_names and 'db_cluster_snapshot_identifier' in module.params: - resource = 'cluster_snapshot' - if method_name == 'delete_db_cluster_snapshot': - waiter = 'db_cluster_snapshot_deleted' - retry_codes = ['InvalidDBClusterSnapshotState'] - elif method_name == 'create_db_cluster_snapshot': - waiter = 'db_cluster_snapshot_available' - retry_codes = ['InvalidDBClusterState'] + retry_codes = ["InvalidDBInstanceState", "InvalidDBSecurityGroupState"] + elif method_name in cluster_snapshot_method_names and "db_cluster_snapshot_identifier" in module.params: + resource = "cluster_snapshot" + if method_name == "delete_db_cluster_snapshot": + waiter = "db_cluster_snapshot_deleted" + retry_codes = ["InvalidDBClusterSnapshotState"] + elif method_name == "create_db_cluster_snapshot": + waiter = "db_cluster_snapshot_available" + retry_codes = ["InvalidDBClusterState"] else: # Tagging - waiter = 'db_cluster_snapshot_available' - retry_codes = ['InvalidDBClusterSnapshotState'] - elif method_name in instance_snapshot_method_names and 'db_snapshot_identifier' in module.params: - resource = 'instance_snapshot' - if method_name == 'delete_db_snapshot': - waiter = 'db_snapshot_deleted' - retry_codes = ['InvalidDBSnapshotState'] - elif method_name == 'create_db_snapshot': - waiter = 'db_snapshot_available' - retry_codes = ['InvalidDBInstanceState'] + waiter = "db_cluster_snapshot_available" + retry_codes = ["InvalidDBClusterSnapshotState"] + elif method_name in instance_snapshot_method_names and "db_snapshot_identifier" in module.params: + resource = "instance_snapshot" + if method_name == "delete_db_snapshot": + waiter = "db_snapshot_deleted" + retry_codes = ["InvalidDBSnapshotState"] + elif method_name == "create_db_snapshot": + waiter = "db_snapshot_available" + retry_codes = ["InvalidDBInstanceState"] else: # Tagging - waiter = 'db_snapshot_available' - retry_codes = ['InvalidDBSnapshotState'] + waiter = "db_snapshot_available" + retry_codes = ["InvalidDBSnapshotState"] else: - if module.params.get('wait'): - raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name)) + if module.params.get("wait"): + raise NotImplementedError( + f"method {method_name} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + ) - return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, - resource=resource, retry_codes=retry_codes) + return Boto3ClientMethod( + name=method_name, waiter=waiter, operation_description=readable_op, resource=resource, retry_codes=retry_codes + ) def get_final_identifier(method_name, module): updated_identifier = None - apply_immediately = module.params.get('apply_immediately') + apply_immediately = module.params.get("apply_immediately") resource = get_rds_method_attribute(method_name, module).resource - if resource == 'cluster': - identifier = module.params['db_cluster_identifier'] - updated_identifier = module.params['new_db_cluster_identifier'] - elif resource == 'instance': - identifier = module.params['db_instance_identifier'] - updated_identifier = module.params['new_db_instance_identifier'] - elif resource == 'instance_snapshot': - identifier = module.params['db_snapshot_identifier'] - elif resource == 'cluster_snapshot': - identifier = module.params['db_cluster_snapshot_identifier'] + if resource == "cluster": + identifier = module.params["db_cluster_identifier"] + updated_identifier = module.params["new_db_cluster_identifier"] + elif resource == "instance": + identifier = module.params["db_instance_identifier"] + updated_identifier = module.params["new_db_instance_identifier"] + elif resource == "instance_snapshot": + identifier = module.params["db_snapshot_identifier"] + elif resource == "cluster_snapshot": + identifier = module.params["db_cluster_snapshot_identifier"] else: - raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name)) + raise NotImplementedError( + f"method {method_name} hasn't been added to the list of accepted methods in module_utils/rds.py", + ) if not module.check_mode and updated_identifier and apply_immediately: identifier = updated_identifier return identifier def handle_errors(module, exception, method_name, parameters): - if not isinstance(exception, ClientError): - module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters)) + module.fail_json_aws(exception, msg=f"Unexpected failure for method {method_name} with parameters {parameters}") changed = True - error_code = exception.response['Error']['Code'] - if ( - method_name in ('modify_db_instance', 'modify_db_cluster') and - error_code == 'InvalidParameterCombination' - ): - if 'No modifications were requested' in to_text(exception): + error_code = exception.response["Error"]["Code"] + if method_name in ("modify_db_instance", "modify_db_cluster") and error_code == "InvalidParameterCombination": + if "No modifications were requested" in to_text(exception): changed = False - elif 'ModifyDbCluster API' in to_text(exception): - module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster') + elif "ModifyDbCluster API" in to_text(exception): + module.fail_json_aws( + exception, + msg="It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster", + ) else: - module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) - elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState': - if 'DB Instance is not a read replica' in to_text(exception): + module.fail_json_aws( + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", + ) + elif method_name == "promote_read_replica" and error_code == "InvalidDBInstanceState": + if "DB Instance is not a read replica" in to_text(exception): changed = False else: - module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) - elif method_name == 'promote_read_replica_db_cluster' and error_code == 'InvalidDBClusterStateFault': - if 'DB Cluster that is not a read replica' in to_text(exception): + module.fail_json_aws( + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", + ) + elif method_name == "promote_read_replica_db_cluster" and error_code == "InvalidDBClusterStateFault": + if "DB Cluster that is not a read replica" in to_text(exception): changed = False else: module.fail_json_aws( exception, - msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description), + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", ) elif method_name == "create_db_cluster" and error_code == "InvalidParameterValue": accepted_engines = ["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"] if parameters.get("Engine") not in accepted_engines: module.fail_json_aws( - exception, msg="DB engine {0} should be one of {1}".format(parameters.get("Engine"), accepted_engines) + exception, msg=f"DB engine {parameters.get('Engine')} should be one of {accepted_engines}" ) else: - module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + module.fail_json_aws( + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", + ) else: - module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + module.fail_json_aws( + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", + ) return changed @@ -202,7 +251,7 @@ def call_method(client, module, method_name, parameters): result = {} changed = True if not module.check_mode: - wait = module.params.get('wait') + wait = module.params.get("wait") retry_codes = get_rds_method_attribute(method_name, module).retry_codes method = getattr(client, method_name) try: @@ -223,26 +272,26 @@ def wait_for_instance_status(client, module, db_instance_id, waiter_name): except ValueError: # using a waiter in module_utils/waiters.py waiter = get_waiter(client, waiter_name) - waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id) + waiter.wait(WaiterConfig={"Delay": 60, "MaxAttempts": 60}, DBInstanceIdentifier=db_instance_id) waiter_expected_status = { - 'db_instance_deleted': 'deleted', - 'db_instance_stopped': 'stopped', + "db_instance_deleted": "deleted", + "db_instance_stopped": "stopped", } - expected_status = waiter_expected_status.get(waiter_name, 'available') + expected_status = waiter_expected_status.get(waiter_name, "available") for _wait_attempts in range(0, 10): try: wait(client, db_instance_id, waiter_name) break except WaiterError as e: # Instance may be renamed and AWSRetry doesn't handle WaiterError - if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound': + if e.last_response.get("Error", {}).get("Code") == "DBInstanceNotFound": sleep(10) continue - module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status)) + module.fail_json_aws(e, msg=f"Error while waiting for DB instance {db_instance_id} to be {expected_status}") except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format( - db_instance_id, expected_status) + module.fail_json_aws( + e, msg=f"Unexpected error while waiting for DB instance {db_instance_id} to be {expected_status}" ) @@ -250,39 +299,44 @@ def wait_for_cluster_status(client, module, db_cluster_id, waiter_name): try: get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id) except WaiterError as e: - if waiter_name == 'cluster_deleted': - msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id) + if waiter_name == "cluster_deleted": + msg = f"Failed to wait for DB cluster {db_cluster_id} to be deleted" else: - msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id) + msg = f"Failed to wait for DB cluster {db_cluster_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id)) + module.fail_json_aws(e, msg=f"Failed with an unexpected error while waiting for the DB cluster {db_cluster_id}") def wait_for_instance_snapshot_status(client, module, db_snapshot_id, waiter_name): try: client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id) except WaiterError as e: - if waiter_name == 'db_snapshot_deleted': - msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id) + if waiter_name == "db_snapshot_deleted": + msg = f"Failed to wait for DB snapshot {db_snapshot_id} to be deleted" else: - msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id) + msg = f"Failed to wait for DB snapshot {db_snapshot_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB snapshot {0}".format(db_snapshot_id)) + module.fail_json_aws( + e, msg=f"Failed with an unexpected error while waiting for the DB snapshot {db_snapshot_id}" + ) def wait_for_cluster_snapshot_status(client, module, db_snapshot_id, waiter_name): try: client.get_waiter(waiter_name).wait(DBClusterSnapshotIdentifier=db_snapshot_id) except WaiterError as e: - if waiter_name == 'db_cluster_snapshot_deleted': - msg = "Failed to wait for DB cluster snapshot {0} to be deleted".format(db_snapshot_id) + if waiter_name == "db_cluster_snapshot_deleted": + msg = f"Failed to wait for DB cluster snapshot {db_snapshot_id} to be deleted" else: - msg = "Failed to wait for DB cluster snapshot {0} to be available".format(db_snapshot_id) + msg = f"Failed to wait for DB cluster snapshot {db_snapshot_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster snapshot {0}".format(db_snapshot_id)) + module.fail_json_aws( + e, + msg=f"Failed with an unexpected error while waiting for the DB cluster snapshot {db_snapshot_id}", + ) def wait_for_status(client, module, identifier, method_name): @@ -290,39 +344,37 @@ def wait_for_status(client, module, identifier, method_name): waiter_name = rds_method_attributes.waiter resource = rds_method_attributes.resource - if resource == 'cluster': + if resource == "cluster": wait_for_cluster_status(client, module, identifier, waiter_name) - elif resource == 'instance': + elif resource == "instance": wait_for_instance_status(client, module, identifier, waiter_name) - elif resource == 'instance_snapshot': + elif resource == "instance_snapshot": wait_for_instance_snapshot_status(client, module, identifier, waiter_name) - elif resource == 'cluster_snapshot': + elif resource == "cluster_snapshot": wait_for_cluster_snapshot_status(client, module, identifier, waiter_name) def get_tags(client, module, resource_arn): try: - return boto3_tag_list_to_ansible_dict( - client.list_tags_for_resource(ResourceName=resource_arn)['TagList'] - ) + return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=resource_arn)["TagList"]) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to describe tags") def arg_spec_to_rds_params(options_dict): - tags = options_dict.pop('tags') + tags = options_dict.pop("tags") has_processor_features = False - if 'processor_features' in options_dict: + if "processor_features" in options_dict: has_processor_features = True - processor_features = options_dict.pop('processor_features') + processor_features = options_dict.pop("processor_features") camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True) for key in list(camel_options.keys()): - for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')): + for old, new in (("Db", "DB"), ("Iam", "IAM"), ("Az", "AZ"), ("Ca", "CA")): if old in key: camel_options[key.replace(old, new)] = camel_options.pop(key) - camel_options['Tags'] = tags + camel_options["Tags"] = tags if has_processor_features: - camel_options['ProcessorFeatures'] = processor_features + camel_options["ProcessorFeatures"] = processor_features return camel_options @@ -333,19 +385,23 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): changed = bool(tags_to_add or tags_to_remove) if tags_to_add: call_method( - client, module, method_name='add_tags_to_resource', - parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)} + client, + module, + method_name="add_tags_to_resource", + parameters={"ResourceName": resource_arn, "Tags": ansible_dict_to_boto3_tag_list(tags_to_add)}, ) if tags_to_remove: call_method( - client, module, method_name='remove_tags_from_resource', - parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove} + client, + module, + method_name="remove_tags_from_resource", + parameters={"ResourceName": resource_arn, "TagKeys": tags_to_remove}, ) return changed def compare_iam_roles(existing_roles, target_roles, purge_roles): - ''' + """ Returns differences between target and existing IAM roles Parameters: @@ -356,15 +412,15 @@ def compare_iam_roles(existing_roles, target_roles, purge_roles): Returns: roles_to_add (list): List of IAM roles to add roles_to_delete (list): List of IAM roles to delete - ''' - existing_roles = [dict((k, v) for k, v in role.items() if k != 'status') for role in existing_roles] + """ + existing_roles = [dict((k, v) for k, v in role.items() if k != "status") for role in existing_roles] roles_to_add = [role for role in target_roles if role not in existing_roles] roles_to_remove = [role for role in existing_roles if role not in target_roles] if purge_roles else [] return roles_to_add, roles_to_remove def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove): - ''' + """ Update a DB instance's associated IAM roles Parameters: @@ -376,15 +432,11 @@ def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove) Returns: changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not - ''' + """ for role in roles_to_remove: - params = {'DBInstanceIdentifier': instance_id, - 'RoleArn': role['role_arn'], - 'FeatureName': role['feature_name']} - _result, changed = call_method(client, module, method_name='remove_role_from_db_instance', parameters=params) + params = {"DBInstanceIdentifier": instance_id, "RoleArn": role["role_arn"], "FeatureName": role["feature_name"]} + _result, changed = call_method(client, module, method_name="remove_role_from_db_instance", parameters=params) for role in roles_to_add: - params = {'DBInstanceIdentifier': instance_id, - 'RoleArn': role['role_arn'], - 'FeatureName': role['feature_name']} - _result, changed = call_method(client, module, method_name='add_role_to_db_instance', parameters=params) + params = {"DBInstanceIdentifier": instance_id, "RoleArn": role["role_arn"], "FeatureName": role["feature_name"]} + _result, changed = call_method(client, module, method_name="add_role_to_db_instance", parameters=params) return changed diff --git a/ansible_collections/amazon/aws/plugins/module_utils/retries.py b/ansible_collections/amazon/aws/plugins/module_utils/retries.py index 1bd214b6b..110b1c8aa 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/retries.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/retries.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -26,11 +28,11 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from functools import wraps try: from botocore.exceptions import ClientError + HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False @@ -53,7 +55,7 @@ class AWSRetry(CloudRetry): @staticmethod def status_code_from_exception(error): - return error.response['Error']['Code'] + return error.response["Error"]["Code"] @staticmethod def found(response_code, catch_extra_error_codes=None): @@ -68,11 +70,51 @@ class AWSRetry(CloudRetry): # # https://github.com/boto/boto3/issues/876 (and linked PRs etc) retry_on = [ - 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', - 'InternalFailure', 'InternalError', 'TooManyRequestsException', - 'Throttling' + "RequestLimitExceeded", + "Unavailable", + "ServiceUnavailable", + "InternalFailure", + "InternalError", + "TooManyRequestsException", + "Throttling", ] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) return response_code in retry_on + + +class RetryingBotoClientWrapper: + __never_wait = ( + "get_paginator", + "can_paginate", + "get_waiter", + "generate_presigned_url", + ) + + def __init__(self, client, retry): + self.client = client + self.retry = retry + + def _create_optional_retry_wrapper_function(self, unwrapped): + retrying_wrapper = self.retry(unwrapped) + + @wraps(unwrapped) + def deciding_wrapper(*args, aws_retry=False, **kwargs): + if aws_retry: + return retrying_wrapper(*args, **kwargs) + else: + return unwrapped(*args, **kwargs) + + return deciding_wrapper + + def __getattr__(self, name): + unwrapped = getattr(self.client, name) + if name in self.__never_wait: + return unwrapped + elif callable(unwrapped): + wrapped = self._create_optional_retry_wrapper_function(unwrapped) + setattr(self, name, wrapped) + return wrapped + else: + return unwrapped diff --git a/ansible_collections/amazon/aws/plugins/module_utils/route53.py b/ansible_collections/amazon/aws/plugins/module_utils/route53.py index 3e2940a53..38e12a52d 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/route53.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/route53.py @@ -1,15 +1,14 @@ +# -*- coding: utf-8 -*- + # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @@ -24,9 +23,9 @@ def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags change_params = dict() if tags_to_set: - change_params['AddTags'] = ansible_dict_to_boto3_tag_list(tags_to_set) + change_params["AddTags"] = ansible_dict_to_boto3_tag_list(tags_to_set) if tags_to_delete: - change_params['RemoveTagKeys'] = tags_to_delete + change_params["RemoveTagKeys"] = tags_to_delete if not change_params: return False @@ -35,14 +34,14 @@ def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags return True try: - client.change_tags_for_resource( - ResourceType=resource_type, - ResourceId=resource_id, - **change_params - ) + client.change_tags_for_resource(ResourceType=resource_type, ResourceId=resource_id, **change_params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to update tags on {0}'.format(resource_type), - resource_id=resource_id, change_params=change_params) + module.fail_json_aws( + e, + msg=f"Failed to update tags on {resource_type}", + resource_id=resource_id, + change_params=change_params, + ) return True @@ -52,13 +51,15 @@ def get_tags(module, client, resource_type, resource_id): ResourceType=resource_type, ResourceId=resource_id, ) - except is_boto3_error_code('NoSuchHealthCheck'): + except is_boto3_error_code("NoSuchHealthCheck"): return {} - except is_boto3_error_code('NoSuchHostedZone'): # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchHostedZone"): # pylint: disable=duplicate-except return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to fetch tags on {0}'.format(resource_type), - resource_id=resource_id) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Failed to fetch tags on {resource_type}", resource_id=resource_id) - tags = boto3_tag_list_to_ansible_dict(tagset['ResourceTagSet']['Tags']) + tags = boto3_tag_list_to_ansible_dict(tagset["ResourceTagSet"]["Tags"]) return tags diff --git a/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/ansible_collections/amazon/aws/plugins/module_utils/s3.py index c13c91f25..73297ffc7 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/s3.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/s3.py @@ -1,102 +1,153 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +import string +from urllib.parse import urlparse try: - from botocore.exceptions import BotoCoreError, ClientError + from hashlib import md5 + + HAS_MD5 = True except ImportError: - pass # Handled by the calling module + HAS_MD5 = False -HAS_MD5 = True try: - from hashlib import md5 + import botocore except ImportError: - try: - from md5 import md5 - except ImportError: - HAS_MD5 = False + pass # Handled by the calling module -import string +from ansible.module_utils.basic import to_text + + +def s3_head_objects(client, parts, bucket, obj, versionId): + args = {"Bucket": bucket, "Key": obj} + if versionId: + args["VersionId"] = versionId + + for part in range(1, parts + 1): + args["PartNumber"] = part + yield client.head_object(**args) + + +def calculate_checksum_with_file(client, parts, bucket, obj, versionId, filename): + digests = [] + with open(filename, "rb") as f: + for head in s3_head_objects(client, parts, bucket, obj, versionId): + digests.append(md5(f.read(int(head["ContentLength"]))).digest()) + + digest_squared = b"".join(digests) + return f'"{md5(digest_squared).hexdigest()}-{len(digests)}"' + + +def calculate_checksum_with_content(client, parts, bucket, obj, versionId, content): + digests = [] + offset = 0 + for head in s3_head_objects(client, parts, bucket, obj, versionId): + length = int(head["ContentLength"]) + digests.append(md5(content[offset:offset + length]).digest()) # fmt: skip + offset += length + + digest_squared = b"".join(digests) + return f'"{md5(digest_squared).hexdigest()}-{len(digests)}"' def calculate_etag(module, filename, etag, s3, bucket, obj, version=None): if not HAS_MD5: return None - if '-' in etag: + if "-" in etag: # Multi-part ETag; a hash of the hashes of each part. - parts = int(etag[1:-1].split('-')[1]) - digests = [] - - s3_kwargs = dict( - Bucket=bucket, - Key=obj, - ) - if version: - s3_kwargs['VersionId'] = version - - with open(filename, 'rb') as f: - for part_num in range(1, parts + 1): - s3_kwargs['PartNumber'] = part_num - try: - head = s3.head_object(**s3_kwargs) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to get head object") - digests.append(md5(f.read(int(head['ContentLength'])))) - - digest_squared = md5(b''.join(m.digest() for m in digests)) - return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests)) + parts = int(etag[1:-1].split("-")[1]) + try: + return calculate_checksum_with_file(s3, parts, bucket, obj, version, filename) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get head object") else: # Compute the MD5 sum normally - return '"{0}"'.format(module.md5(filename)) + return f'"{module.md5(filename)}"' def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None): if not HAS_MD5: return None - if '-' in etag: + if "-" in etag: # Multi-part ETag; a hash of the hashes of each part. - parts = int(etag[1:-1].split('-')[1]) - digests = [] - offset = 0 - - s3_kwargs = dict( - Bucket=bucket, - Key=obj, - ) - if version: - s3_kwargs['VersionId'] = version - - for part_num in range(1, parts + 1): - s3_kwargs['PartNumber'] = part_num - try: - head = s3.head_object(**s3_kwargs) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to get head object") - length = int(head['ContentLength']) - digests.append(md5(content[offset:offset + length])) - offset += length - - digest_squared = md5(b''.join(m.digest() for m in digests)) - return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests)) + parts = int(etag[1:-1].split("-")[1]) + try: + return calculate_checksum_with_content(s3, parts, bucket, obj, version, content) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get head object") else: # Compute the MD5 sum normally - return '"{0}"'.format(md5(content).hexdigest()) + return f'"{md5(content).hexdigest()}"' -def validate_bucket_name(module, name): +def validate_bucket_name(name): # See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html if len(name) < 3: - module.fail_json(msg='the length of an S3 bucket must be at least 3 characters') + return "the length of an S3 bucket must be at least 3 characters" if len(name) > 63: - module.fail_json(msg='the length of an S3 bucket cannot exceed 63 characters') + return "the length of an S3 bucket cannot exceed 63 characters" legal_characters = string.ascii_lowercase + ".-" + string.digits illegal_characters = [c for c in name if c not in legal_characters] if illegal_characters: - module.fail_json(msg='invalid character(s) found in the bucket name') + return "invalid character(s) found in the bucket name" if name[-1] not in string.ascii_lowercase + string.digits: - module.fail_json(msg='bucket names must begin and end with a letter or number') - return True + return "bucket names must begin and end with a letter or number" + return None + + +# Spot special case of fakes3. +def is_fakes3(url): + """Return True if endpoint_url has scheme fakes3://""" + result = False + if url is not None: + result = urlparse(url).scheme in ("fakes3", "fakes3s") + return result + + +def parse_fakes3_endpoint(url): + fakes3 = urlparse(url) + protocol = "http" + port = fakes3.port or 80 + if fakes3.scheme == "fakes3s": + protocol = "https" + port = fakes3.port or 443 + endpoint_url = f"{protocol}://{fakes3.hostname}:{to_text(port)}" + use_ssl = bool(fakes3.scheme == "fakes3s") + return {"endpoint": endpoint_url, "use_ssl": use_ssl} + + +def parse_ceph_endpoint(url): + ceph = urlparse(url) + use_ssl = bool(ceph.scheme == "https") + return {"endpoint": url, "use_ssl": use_ssl} + + +def parse_s3_endpoint(options): + endpoint_url = options.get("endpoint_url") + if options.get("ceph"): + return False, parse_ceph_endpoint(endpoint_url) + if is_fakes3(endpoint_url): + return False, parse_fakes3_endpoint(endpoint_url) + return True, {"endpoint": endpoint_url} + + +def s3_extra_params(options, sigv4=False): + aws, extra_params = parse_s3_endpoint(options) + endpoint = extra_params["endpoint"] + if not aws: + return extra_params + dualstack = options.get("dualstack") + if not dualstack and not sigv4: + return extra_params + config = {} + if dualstack: + config["use_dualstack_endpoint"] = True + if sigv4: + config["signature_version"] = "s3v4" + extra_params["config"] = config + return extra_params diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py index 1568e4887..9201c8979 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -26,17 +28,13 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from ansible.module_utils._text import to_native from ansible.module_utils._text import to_text from ansible.module_utils.six import string_types def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None): - - """ Convert a boto3 list of resource tags to a flat dict of key:value pairs + """Convert a boto3 list of resource tags to a flat dict of key:value pairs Args: tags_list (list): List of dicts representing AWS tags. tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") @@ -60,7 +58,7 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_ if tag_name_key_name and tag_value_key_name: tag_candidates = {tag_name_key_name: tag_value_key_name} else: - tag_candidates = {'key': 'value', 'Key': 'Value'} + tag_candidates = {"key": "value", "Key": "Value"} # minio seems to return [{}] as an empty tags_list if not tags_list or not any(tag for tag in tags_list): @@ -68,12 +66,17 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_ for k, v in tag_candidates.items(): if k in tags_list[0] and v in tags_list[0]: return dict((tag[k], tag[v]) for tag in tags_list) - raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list))) + raise ValueError(f"Couldn't find tag key (candidates {str(tag_candidates)}) in tag list {str(tags_list)}") -def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'): +def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name="Key", tag_value_key_name="Value"): + """Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts + + Note: booleans are converted to their Capitalized text form ("True" and "False"), this is + different to ansible_dict_to_boto3_filter_list because historically we've used "to_text()" and + AWS stores tags as strings, whereas for things which are actually booleans in AWS are returned + as lowercase strings in filters. - """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts Args: tags_dict (dict): Dict representing AWS resource tags. tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") @@ -104,8 +107,36 @@ def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value return tags_list +def _tag_name_to_filter_key(tag_name): + return f"tag:{tag_name}" + + +def ansible_dict_to_tag_filter_dict(tags_dict): + """Prepends "tag:" to all of the keys (not the values) in a dict + This is useful when you're then going to build a filter including the tags. + + Note: booleans are converted to their Capitalized text form ("True" and "False"), this is + different to ansible_dict_to_boto3_filter_list because historically we've used "to_text()" and + AWS stores tags as strings, whereas for things which are actually booleans in AWS are returned + as lowercase strings in filters. + + Args: + tags_dict (dict): Dict representing AWS resource tags. + + Basic Usage: + >>> filters = ansible_dict_to_boto3_filter_list(ansible_dict_to_tag_filter_dict(tags)) + + Returns: + Dict: A dictionary suitable for passing to ansible_dict_to_boto3_filter_list which can + also be combined with other common filter parameters. + """ + if not tags_dict: + return {} + return {_tag_name_to_filter_key(k): to_native(v) for k, v in tags_dict.items()} + + def boto3_tag_specifications(tags_dict, types=None): - """ Converts a list of resource types and a flat dictionary of key:value pairs representing AWS + """Converts a list of resource types and a flat dictionary of key:value pairs representing AWS resource tags to a TagSpecification object. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TagSpecification.html @@ -170,7 +201,7 @@ def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True): continue # Amazon have reserved 'aws:*' tags, we should avoid purging them as # this probably isn't what people want to do... - if key.startswith('aws:'): + if key.startswith("aws:"): continue tag_keys_to_unset.append(key) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tower.py b/ansible_collections/amazon/aws/plugins/module_utils/tower.py index dd7d9738a..24726d4c2 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/tower.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/tower.py @@ -1,9 +1,8 @@ +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - import string import textwrap @@ -12,7 +11,9 @@ from ansible.module_utils.six.moves.urllib import parse as urlparse def _windows_callback_script(passwd=None): - script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1' + script_url = ( + "https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1" + ) if passwd is not None: passwd = passwd.replace("'", "''") script_tpl = """\ @@ -72,9 +73,7 @@ def _linux_callback_script(tower_address, template_id, host_config_key): exit 1 """ tpl = string.Template(textwrap.dedent(script_tpl)) - return tpl.safe_substitute(tower_address=tower_address, - template_id=template_id, - host_config_key=host_config_key) + return tpl.safe_substitute(tower_address=tower_address, template_id=template_id, host_config_key=host_config_key) def tower_callback_script(tower_address, job_template_id, host_config_key, windows, passwd): diff --git a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py index 70d38cd8a..708736fc0 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -26,16 +28,12 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.module_utils.six import string_types from ansible.module_utils.six import integer_types +from ansible.module_utils.six import string_types def ansible_dict_to_boto3_filter_list(filters_dict): - - """ Convert an Ansible dict of filters to list of dicts that boto3 can use + """Convert an Ansible dict of filters to list of dicts that boto3 can use Args: filters_dict (dict): Dict of AWS filters. Basic Usage: @@ -58,15 +56,15 @@ def ansible_dict_to_boto3_filter_list(filters_dict): filters_list = [] for k, v in filters_dict.items(): - filter_dict = {'Name': k} + filter_dict = {"Name": k} if isinstance(v, bool): - filter_dict['Values'] = [str(v).lower()] + filter_dict["Values"] = [str(v).lower()] elif isinstance(v, integer_types): - filter_dict['Values'] = [str(v)] + filter_dict["Values"] = [str(v)] elif isinstance(v, string_types): - filter_dict['Values'] = [v] + filter_dict["Values"] = [v] else: - filter_dict['Values'] = v + filter_dict["Values"] = v filters_list.append(filter_dict) @@ -75,18 +73,18 @@ def ansible_dict_to_boto3_filter_list(filters_dict): def map_complex_type(complex_type, type_map): """ - Allows to cast elements within a dictionary to a specific type - Example of usage: + Allows to cast elements within a dictionary to a specific type + Example of usage: - DEPLOYMENT_CONFIGURATION_TYPE_MAP = { - 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int' - } + DEPLOYMENT_CONFIGURATION_TYPE_MAP = { + 'maximum_percent': 'int', + 'minimum_healthy_percent': 'int' + } - deployment_configuration = map_complex_type(module.params['deployment_configuration'], - DEPLOYMENT_CONFIGURATION_TYPE_MAP) + deployment_configuration = map_complex_type(module.params['deployment_configuration'], + DEPLOYMENT_CONFIGURATION_TYPE_MAP) - This ensures all keys within the root element are casted and valid integers + This ensures all keys within the root element are casted and valid integers """ if complex_type is None: @@ -96,22 +94,16 @@ def map_complex_type(complex_type, type_map): for key in complex_type: if key in type_map: if isinstance(type_map[key], list): - new_type[key] = map_complex_type( - complex_type[key], - type_map[key][0]) + new_type[key] = map_complex_type(complex_type[key], type_map[key][0]) else: - new_type[key] = map_complex_type( - complex_type[key], - type_map[key]) + new_type[key] = map_complex_type(complex_type[key], type_map[key]) else: new_type[key] = complex_type[key] elif isinstance(complex_type, list): for i in range(len(complex_type)): - new_type.append(map_complex_type( - complex_type[i], - type_map)) + new_type.append(map_complex_type(complex_type[i], type_map)) elif type_map: - return globals()['__builtins__'][type_map](complex_type) + return globals()["__builtins__"][type_map](complex_type) return new_type @@ -133,7 +125,10 @@ def scrub_none_parameters(parameters, descend_into_lists=True): if isinstance(v, dict): clean_parameters[k] = scrub_none_parameters(v, descend_into_lists=descend_into_lists) elif descend_into_lists and isinstance(v, list): - clean_parameters[k] = [scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv for vv in v] + clean_parameters[k] = [ + scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv + for vv in v + ] elif v is not None: clean_parameters[k] = v diff --git a/ansible_collections/amazon/aws/plugins/module_utils/urls.py b/ansible_collections/amazon/aws/plugins/module_utils/urls.py deleted file mode 100644 index 8011a1be9..000000000 --- a/ansible_collections/amazon/aws/plugins/module_utils/urls.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright: (c) 2018, Aaron Haaf <aabonh@gmail.com> -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import datetime -import hashlib -import hmac -import operator - -try: - from boto3 import session -except ImportError: - pass - -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url - -from .ec2 import HAS_BOTO3 -from .ec2 import get_aws_connection_info - -import ansible.module_utils.common.warnings as ansible_warnings - - -def hexdigest(s): - """ - Returns the sha256 hexdigest of a string after encoding. - """ - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.hexdigest is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - return hashlib.sha256(s.encode("utf-8")).hexdigest() - - -def format_querystring(params=None): - """ - Returns properly url-encoded query string from the provided params dict. - - It's specially sorted for cannonical requests - """ - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.format_querystring is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - if not params: - return "" - - # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name. - return urlencode(sorted(params.items(), operator.itemgetter(0))) - - -# Key derivation functions. See: -# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python -def sign(key, msg): - ''' - Return digest for key applied to msg - ''' - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.sign is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest() - - -def get_signature_key(key, dateStamp, regionName, serviceName): - ''' - Returns signature key for AWS resource - ''' - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.get_signature_key is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp) - kRegion = sign(kDate, regionName) - kService = sign(kRegion, serviceName) - kSigning = sign(kService, "aws4_request") - return kSigning - - -def get_aws_credentials_object(module): - ''' - Returns aws_access_key_id, aws_secret_access_key, session_token for a module. - ''' - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.get_aws_credentials_object is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - if not HAS_BOTO3: - module.fail_json("get_aws_credentials_object requires boto3") - - dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True) - s = session.Session(**boto_params) - - return s.get_credentials() - - -# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html -def signed_request( - module=None, - method="GET", service=None, host=None, uri=None, - query=None, body="", headers=None, - session_in_header=True, session_in_query=False -): - """Generate a SigV4 request to an AWS resource for a module - - This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain. - - Returns :class:`HTTPResponse` object. - - Example: - result = signed_request( - module=this, - service="es", - host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com", - ) - - :kwarg host: endpoint to talk to - :kwarg service: AWS id of service (like `ec2` or `es`) - :kwarg module: An AnsibleAWSModule to gather connection info from - - :kwarg body: (optional) Payload to send - :kwarg method: (optional) HTTP verb to use - :kwarg query: (optional) dict of query params to handle - :kwarg uri: (optional) Resource path without query parameters - - :kwarg session_in_header: (optional) Add the session token to the headers - :kwarg session_in_query: (optional) Add the session token to the query parameters - - :returns: HTTPResponse - """ - - module.deprecate( - 'amazon.aws.module_utils.urls.signed_request is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - if not HAS_BOTO3: - module.fail_json("A sigv4 signed_request requires boto3") - - # "Constants" - - t = datetime.datetime.utcnow() - amz_date = t.strftime("%Y%m%dT%H%M%SZ") - datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope - algorithm = "AWS4-HMAC-SHA256" - - # AWS stuff - - region, dummy, dummy = get_aws_connection_info(module, boto3=True) - credentials = get_aws_credentials_object(module) - access_key = credentials.access_key - secret_key = credentials.secret_key - session_token = credentials.token - - if not access_key: - module.fail_json(msg="aws_access_key_id is missing") - if not secret_key: - module.fail_json(msg="aws_secret_access_key is missing") - - credential_scope = "/".join([datestamp, region, service, "aws4_request"]) - - # Argument Defaults - - uri = uri or "/" - query_string = format_querystring(query) if query else "" - - headers = headers or dict() - query = query or dict() - - headers.update({ - "host": host, - "x-amz-date": amz_date, - }) - - # Handle adding of session_token if present - if session_token: - if session_in_header: - headers["X-Amz-Security-Token"] = session_token - if session_in_query: - query["X-Amz-Security-Token"] = session_token - - if method == "GET": - body = "" - - # Derived data - - body_hash = hexdigest(body) - signed_headers = ";".join(sorted(headers.keys())) - - # Setup Cannonical request to generate auth token - - cannonical_headers = "\n".join([ - key.lower().strip() + ":" + value for key, value in headers.items() - ]) + "\n" # Note additional trailing newline - - cannonical_request = "\n".join([ - method, - uri, - query_string, - cannonical_headers, - signed_headers, - body_hash, - ]) - - string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)]) - - # Sign the Cannonical request - - signing_key = get_signature_key(secret_key, datestamp, region, service) - signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() - - # Make auth header with that info - - authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format( - algorithm, access_key, credential_scope, signed_headers, signature - ) - - # PERFORM THE REQUEST! - - url = "https://" + host + uri - - if query_string != "": - url = url + "?" + query_string - - final_headers = { - "x-amz-date": amz_date, - "Authorization": authorization_header, - } - - final_headers.update(headers) - - return open_url(url, method=method, data=body, headers=final_headers) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/version.py b/ansible_collections/amazon/aws/plugins/module_utils/version.py index 8f4ca3638..444bde5d6 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/version.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/version.py @@ -5,14 +5,6 @@ """Provide version object to compare version numbers.""" -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can -# remove the _version.py file, and replace the following import by -# -# from ansible.module_utils.compat.version import LooseVersion - -from ._version import LooseVersion # pylint: disable=unused-import +# This should be directly imported by modules, rather than importing from here. +# The import is being kept for backwards compatibility. +from ansible.module_utils.compat.version import LooseVersion # pylint: disable=unused-import diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waf.py b/ansible_collections/amazon/aws/plugins/module_utils/waf.py index 226dca920..5e1cf1071 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/waf.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/waf.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Will Thames # # This code is part of Ansible, but is an independent component. @@ -24,14 +26,11 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# + """ This module adds shared support for Web Application Firewall modules """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - try: import botocore except ImportError: @@ -39,84 +38,78 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from .ec2 import AWSRetry +from .retries import AWSRetry from .waiters import get_waiter - MATCH_LOOKUP = { - 'byte': { - 'method': 'byte_match_set', - 'conditionset': 'ByteMatchSet', - 'conditiontuple': 'ByteMatchTuple', - 'type': 'ByteMatch' - }, - 'geo': { - 'method': 'geo_match_set', - 'conditionset': 'GeoMatchSet', - 'conditiontuple': 'GeoMatchConstraint', - 'type': 'GeoMatch' + "byte": { + "method": "byte_match_set", + "conditionset": "ByteMatchSet", + "conditiontuple": "ByteMatchTuple", + "type": "ByteMatch", }, - 'ip': { - 'method': 'ip_set', - 'conditionset': 'IPSet', - 'conditiontuple': 'IPSetDescriptor', - 'type': 'IPMatch' + "geo": { + "method": "geo_match_set", + "conditionset": "GeoMatchSet", + "conditiontuple": "GeoMatchConstraint", + "type": "GeoMatch", }, - 'regex': { - 'method': 'regex_match_set', - 'conditionset': 'RegexMatchSet', - 'conditiontuple': 'RegexMatchTuple', - 'type': 'RegexMatch' + "ip": {"method": "ip_set", "conditionset": "IPSet", "conditiontuple": "IPSetDescriptor", "type": "IPMatch"}, + "regex": { + "method": "regex_match_set", + "conditionset": "RegexMatchSet", + "conditiontuple": "RegexMatchTuple", + "type": "RegexMatch", }, - 'size': { - 'method': 'size_constraint_set', - 'conditionset': 'SizeConstraintSet', - 'conditiontuple': 'SizeConstraint', - 'type': 'SizeConstraint' + "size": { + "method": "size_constraint_set", + "conditionset": "SizeConstraintSet", + "conditiontuple": "SizeConstraint", + "type": "SizeConstraint", }, - 'sql': { - 'method': 'sql_injection_match_set', - 'conditionset': 'SqlInjectionMatchSet', - 'conditiontuple': 'SqlInjectionMatchTuple', - 'type': 'SqlInjectionMatch', + "sql": { + "method": "sql_injection_match_set", + "conditionset": "SqlInjectionMatchSet", + "conditiontuple": "SqlInjectionMatchTuple", + "type": "SqlInjectionMatch", }, - 'xss': { - 'method': 'xss_match_set', - 'conditionset': 'XssMatchSet', - 'conditiontuple': 'XssMatchTuple', - 'type': 'XssMatch' + "xss": { + "method": "xss_match_set", + "conditionset": "XssMatchSet", + "conditiontuple": "XssMatchTuple", + "type": "XssMatch", }, } @AWSRetry.jittered_backoff(delay=5) def get_rule_with_backoff(client, rule_id): - return client.get_rule(RuleId=rule_id)['Rule'] + return client.get_rule(RuleId=rule_id)["Rule"] @AWSRetry.jittered_backoff(delay=5) def get_byte_match_set_with_backoff(client, byte_match_set_id): - return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet'] + return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)["ByteMatchSet"] @AWSRetry.jittered_backoff(delay=5) def get_ip_set_with_backoff(client, ip_set_id): - return client.get_ip_set(IPSetId=ip_set_id)['IPSet'] + return client.get_ip_set(IPSetId=ip_set_id)["IPSet"] @AWSRetry.jittered_backoff(delay=5) def get_size_constraint_set_with_backoff(client, size_constraint_set_id): - return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet'] + return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)["SizeConstraintSet"] @AWSRetry.jittered_backoff(delay=5) def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id): - return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet'] + return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)["SqlInjectionMatchSet"] @AWSRetry.jittered_backoff(delay=5) def get_xss_match_set_with_backoff(client, xss_match_set_id): - return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet'] + return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)["XssMatchSet"] def get_rule(client, module, rule_id): @@ -126,24 +119,24 @@ def get_rule(client, module, rule_id): module.fail_json_aws(e, msg="Couldn't obtain waf rule") match_sets = { - 'ByteMatch': get_byte_match_set_with_backoff, - 'IPMatch': get_ip_set_with_backoff, - 'SizeConstraint': get_size_constraint_set_with_backoff, - 'SqlInjectionMatch': get_sql_injection_match_set_with_backoff, - 'XssMatch': get_xss_match_set_with_backoff + "ByteMatch": get_byte_match_set_with_backoff, + "IPMatch": get_ip_set_with_backoff, + "SizeConstraint": get_size_constraint_set_with_backoff, + "SqlInjectionMatch": get_sql_injection_match_set_with_backoff, + "XssMatch": get_xss_match_set_with_backoff, } - if 'Predicates' in rule: - for predicate in rule['Predicates']: - if predicate['Type'] in match_sets: - predicate.update(match_sets[predicate['Type']](client, predicate['DataId'])) + if "Predicates" in rule: + for predicate in rule["Predicates"]: + if predicate["Type"] in match_sets: + predicate.update(match_sets[predicate["Type"]](client, predicate["DataId"])) # replaced by Id from the relevant MatchSet - del predicate['DataId'] + del predicate["DataId"] return rule @AWSRetry.jittered_backoff(delay=5) def get_web_acl_with_backoff(client, web_acl_id): - return client.get_web_acl(WebACLId=web_acl_id)['WebACL'] + return client.get_web_acl(WebACLId=web_acl_id)["WebACL"] def get_web_acl(client, module, web_acl_id): @@ -154,8 +147,8 @@ def get_web_acl(client, module, web_acl_id): if web_acl: try: - for rule in web_acl['Rules']: - rule.update(get_rule(client, module, rule['RuleId'])) + for rule in web_acl["Rules"]: + rule.update(get_rule(client, module, rule["RuleId"])) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain web acl rule") return camel_dict_to_snake_dict(web_acl) @@ -163,8 +156,8 @@ def get_web_acl(client, module, web_acl_id): @AWSRetry.jittered_backoff(delay=5) def list_rules_with_backoff(client): - paginator = client.get_paginator('list_rules') - return paginator.paginate().build_full_result()['Rules'] + paginator = client.get_paginator("list_rules") + return paginator.paginate().build_full_result()["Rules"] @AWSRetry.jittered_backoff(delay=5) @@ -172,15 +165,15 @@ def list_regional_rules_with_backoff(client): resp = client.list_rules() rules = [] while resp: - rules += resp['Rules'] - resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None + rules += resp["Rules"] + resp = client.list_rules(NextMarker=resp["NextMarker"]) if "NextMarker" in resp else None return rules @AWSRetry.jittered_backoff(delay=5) def list_web_acls_with_backoff(client): - paginator = client.get_paginator('list_web_acls') - return paginator.paginate().build_full_result()['WebACLs'] + paginator = client.get_paginator("list_web_acls") + return paginator.paginate().build_full_result()["WebACLs"] @AWSRetry.jittered_backoff(delay=5) @@ -188,16 +181,16 @@ def list_regional_web_acls_with_backoff(client): resp = client.list_web_acls() acls = [] while resp: - acls += resp['WebACLs'] - resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None + acls += resp["WebACLs"] + resp = client.list_web_acls(NextMarker=resp["NextMarker"]) if "NextMarker" in resp else None return acls def list_web_acls(client, module): try: - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": return list_web_acls_with_backoff(client) - elif client.__class__.__name__ == 'WAFRegional': + elif client.__class__.__name__ == "WAFRegional": return list_regional_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain web acls") @@ -206,19 +199,18 @@ def list_web_acls(client, module): def get_change_token(client, module): try: token = client.get_change_token() - return token['ChangeToken'] + return token["ChangeToken"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain change token") -@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=['WAFStaleDataException']) +@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=["WAFStaleDataException"]) def run_func_with_change_token_backoff(client, module, params, func, wait=False): - params['ChangeToken'] = get_change_token(client, module) + params["ChangeToken"] = get_change_token(client, module) result = func(**params) if wait: get_waiter( - client, 'change_token_in_sync', - ).wait( - ChangeToken=result['ChangeToken'] - ) + client, + "change_token_in_sync", + ).wait(ChangeToken=result["ChangeToken"]) return result diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py index 2abf390cb..51d6b4568 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py @@ -1,9 +1,8 @@ +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import copy try: @@ -11,8 +10,7 @@ try: except ImportError: pass # caught by HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.modules import _RetryingBotoClientWrapper - +from ansible_collections.amazon.aws.plugins.module_utils.retries import RetryingBotoClientWrapper ec2_data = { "version": 2, @@ -22,37 +20,19 @@ ec2_data = { "maxAttempts": 80, "delay": 15, "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "argument": "Images[].State", - "expected": "available" - }, - { - "state": "failure", - "matcher": "pathAny", - "argument": "Images[].State", - "expected": "failed" - } - ] + {"state": "success", "matcher": "pathAll", "argument": "Images[].State", "expected": "available"}, + {"matcher": "error", "expected": "InvalidAMIID.NotFound", "state": "retry"}, + {"state": "failure", "matcher": "pathAny", "argument": "Images[].State", "expected": "failed"}, + ], }, "InternetGatewayExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeInternetGateways", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(InternetGateways) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidInternetGatewayID.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(InternetGateways) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidInternetGatewayID.NotFound", "state": "retry"}, + ], }, "InternetGatewayAttached": { "operation": "DescribeInternetGateways", @@ -63,14 +43,10 @@ ec2_data = { "expected": "available", "matcher": "pathAll", "state": "success", - "argument": "InternetGateways[].Attachments[].State" + "argument": "InternetGateways[].Attachments[].State", }, - { - "matcher": "error", - "expected": "InvalidInternetGatewayID.NotFound", - "state": "retry" - }, - ] + {"matcher": "error", "expected": "InvalidInternetGatewayID.NotFound", "state": "retry"}, + ], }, "NetworkInterfaceAttached": { "operation": "DescribeNetworkInterfaces", @@ -81,14 +57,10 @@ ec2_data = { "expected": "attached", "matcher": "pathAll", "state": "success", - "argument": "NetworkInterfaces[].Attachment.Status" + "argument": "NetworkInterfaces[].Attachment.Status", }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "failure" - }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"}, + ], }, "NetworkInterfaceAvailable": { "operation": "DescribeNetworkInterfaces", @@ -99,14 +71,10 @@ ec2_data = { "expected": "available", "matcher": "pathAll", "state": "success", - "argument": "NetworkInterfaces[].Status" + "argument": "NetworkInterfaces[].Status", }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "retry" - }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "retry"}, + ], }, "NetworkInterfaceDeleted": { "operation": "DescribeNetworkInterfaces", @@ -117,20 +85,16 @@ ec2_data = { "matcher": "path", "expected": True, "argument": "length(NetworkInterfaces[]) > `0`", - "state": "retry" + "state": "retry", }, { "matcher": "path", "expected": True, "argument": "length(NetworkInterfaces[]) == `0`", - "state": "success" - }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "success" + "state": "success", }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "success"}, + ], }, "NetworkInterfaceDeleteOnTerminate": { "operation": "DescribeNetworkInterfaces", @@ -141,14 +105,10 @@ ec2_data = { "expected": True, "matcher": "pathAll", "state": "success", - "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination" + "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination", }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "failure" - }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"}, + ], }, "NetworkInterfaceNoDeleteOnTerminate": { "operation": "DescribeNetworkInterfaces", @@ -159,94 +119,53 @@ ec2_data = { "expected": False, "matcher": "pathAll", "state": "success", - "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination" + "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination", }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "failure" - }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"}, + ], }, "RouteTableExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeRouteTables", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(RouteTables[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidRouteTableID.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(RouteTables[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidRouteTableID.NotFound", "state": "retry"}, + ], }, "SecurityGroupExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeSecurityGroups", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(SecurityGroups[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidGroup.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(SecurityGroups[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidGroup.NotFound", "state": "retry"}, + ], }, "SnapshotCompleted": { "delay": 15, "operation": "DescribeSnapshots", "maxAttempts": 40, "acceptors": [ - { - "expected": "completed", - "matcher": "pathAll", - "state": "success", - "argument": "Snapshots[].State" - } - ] + {"expected": "completed", "matcher": "pathAll", "state": "success", "argument": "Snapshots[].State"} + ], }, "SubnetAvailable": { "delay": 15, "operation": "DescribeSubnets", "maxAttempts": 40, "acceptors": [ - { - "expected": "available", - "matcher": "pathAll", - "state": "success", - "argument": "Subnets[].State" - } - ] + {"expected": "available", "matcher": "pathAll", "state": "success", "argument": "Subnets[].State"} + ], }, "SubnetExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeSubnets", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(Subnets[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidSubnetID.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(Subnets[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidSubnetID.NotFound", "state": "retry"}, + ], }, "SubnetHasMapPublic": { "delay": 5, @@ -257,9 +176,9 @@ ec2_data = { "matcher": "pathAll", "expected": True, "argument": "Subnets[].MapPublicIpOnLaunch", - "state": "success" + "state": "success", }, - ] + ], }, "SubnetNoMapPublic": { "delay": 5, @@ -270,9 +189,9 @@ ec2_data = { "matcher": "pathAll", "expected": False, "argument": "Subnets[].MapPublicIpOnLaunch", - "state": "success" + "state": "success", }, - ] + ], }, "SubnetHasAssignIpv6": { "delay": 5, @@ -283,9 +202,9 @@ ec2_data = { "matcher": "pathAll", "expected": True, "argument": "Subnets[].AssignIpv6AddressOnCreation", - "state": "success" + "state": "success", }, - ] + ], }, "SubnetNoAssignIpv6": { "delay": 5, @@ -296,93 +215,53 @@ ec2_data = { "matcher": "pathAll", "expected": False, "argument": "Subnets[].AssignIpv6AddressOnCreation", - "state": "success" + "state": "success", }, - ] + ], }, "SubnetDeleted": { "delay": 5, "maxAttempts": 40, "operation": "DescribeSubnets", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(Subnets[]) > `0`", - "state": "retry" - }, - { - "matcher": "error", - "expected": "InvalidSubnetID.NotFound", - "state": "success" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(Subnets[]) > `0`", "state": "retry"}, + {"matcher": "error", "expected": "InvalidSubnetID.NotFound", "state": "success"}, + ], }, "VpcAvailable": { "delay": 15, "operation": "DescribeVpcs", "maxAttempts": 40, "acceptors": [ - { - "expected": "available", - "matcher": "pathAll", - "state": "success", - "argument": "Vpcs[].State" - } - ] + {"expected": "available", "matcher": "pathAll", "state": "success", "argument": "Vpcs[].State"} + ], }, "VpcExists": { "operation": "DescribeVpcs", "delay": 1, "maxAttempts": 5, "acceptors": [ - { - "matcher": "status", - "expected": 200, - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidVpcID.NotFound", - "state": "retry" - } - ] + {"matcher": "status", "expected": 200, "state": "success"}, + {"matcher": "error", "expected": "InvalidVpcID.NotFound", "state": "retry"}, + ], }, "VpcEndpointExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeVpcEndpoints", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(VpcEndpoints[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidVpcEndpointId.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(VpcEndpoints[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidVpcEndpointId.NotFound", "state": "retry"}, + ], }, "VpnGatewayExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeVpnGateways", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(VpnGateways[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidVpnGatewayID.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(VpnGateways[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidVpnGatewayID.NotFound", "state": "retry"}, + ], }, "VpnGatewayDetached": { "delay": 5, @@ -393,47 +272,29 @@ ec2_data = { "matcher": "path", "expected": True, "argument": "VpnGateways[0].State == 'available'", - "state": "success" + "state": "success", }, - ] + ], }, "NatGatewayDeleted": { "delay": 5, "maxAttempts": 40, "operation": "DescribeNatGateways", "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "expected": "deleted", - "argument": "NatGateways[].State" - }, - { - "state": "success", - "matcher": "error", - "expected": "NatGatewayNotFound" - } - ] + {"state": "success", "matcher": "pathAll", "expected": "deleted", "argument": "NatGateways[].State"}, + {"state": "success", "matcher": "error", "expected": "NatGatewayNotFound"}, + ], }, "NatGatewayAvailable": { "delay": 5, "maxAttempts": 40, "operation": "DescribeNatGateways", "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "expected": "available", - "argument": "NatGateways[].State" - }, - { - "state": "retry", - "matcher": "error", - "expected": "NatGatewayNotFound" - } - ] + {"state": "success", "matcher": "pathAll", "expected": "available", "argument": "NatGateways[].State"}, + {"state": "retry", "matcher": "error", "expected": "NatGatewayNotFound"}, + ], }, - } + }, } @@ -445,20 +306,11 @@ waf_data = { "maxAttempts": 60, "operation": "GetChangeTokenStatus", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "ChangeTokenStatus == 'INSYNC'", - "state": "success" - }, - { - "matcher": "error", - "expected": "WAFInternalErrorException", - "state": "retry" - } - ] + {"matcher": "path", "expected": True, "argument": "ChangeTokenStatus == 'INSYNC'", "state": "success"}, + {"matcher": "error", "expected": "WAFInternalErrorException", "state": "retry"}, + ], } - } + }, } eks_data = { @@ -469,54 +321,27 @@ eks_data = { "maxAttempts": 60, "operation": "DescribeCluster", "acceptors": [ - { - "state": "success", - "matcher": "path", - "argument": "cluster.status", - "expected": "ACTIVE" - }, - { - "state": "retry", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "success", "matcher": "path", "argument": "cluster.status", "expected": "ACTIVE"}, + {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "ClusterDeleted": { "delay": 20, "maxAttempts": 60, "operation": "DescribeCluster", "acceptors": [ - { - "state": "retry", - "matcher": "path", - "argument": "cluster.status != 'DELETED'", - "expected": True - }, - { - "state": "success", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "retry", "matcher": "path", "argument": "cluster.status != 'DELETED'", "expected": True}, + {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "FargateProfileActive": { "delay": 20, "maxAttempts": 30, "operation": "DescribeFargateProfile", "acceptors": [ - { - "state": "success", - "matcher": "path", - "argument": "fargateProfile.status", - "expected": "ACTIVE" - }, - { - "state": "retry", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "success", "matcher": "path", "argument": "fargateProfile.status", "expected": "ACTIVE"}, + {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "FargateProfileDeleted": { "delay": 20, @@ -527,52 +352,30 @@ eks_data = { "state": "retry", "matcher": "path", "argument": "fargateProfile.status == 'DELETING'", - "expected": True + "expected": True, }, - { - "state": "success", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "NodegroupActive": { "delay": 20, "maxAttempts": 60, "operation": "DescribeNodegroup", "acceptors": [ - { - "state": "success", - "matcher": "path", - "argument": "nodegroup.status", - "expected": "ACTIVE" - }, - { - "state": "retry", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "success", "matcher": "path", "argument": "nodegroup.status", "expected": "ACTIVE"}, + {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "NodegroupDeleted": { "delay": 20, "maxAttempts": 60, "operation": "DescribeNodegroup", "acceptors": [ - { - "state": "retry", - "matcher": "path", - "argument": "nodegroup.status == 'DELETING'", - "expected": True - }, - { - "state": "success", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] - } - } + {"state": "retry", "matcher": "path", "argument": "nodegroup.status == 'DELETING'", "expected": True}, + {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], + }, + }, } @@ -585,12 +388,12 @@ elb_data = { "argument": "InstanceStates[].State", "expected": "InService", "matcher": "pathAny", - "state": "success" + "state": "success", } ], "delay": 15, "maxAttempts": 40, - "operation": "DescribeInstanceHealth" + "operation": "DescribeInstanceHealth", }, "InstanceDeregistered": { "delay": 15, @@ -601,14 +404,10 @@ elb_data = { "expected": "OutOfService", "matcher": "pathAll", "state": "success", - "argument": "InstanceStates[].State" + "argument": "InstanceStates[].State", }, - { - "matcher": "error", - "expected": "InvalidInstance", - "state": "success" - } - ] + {"matcher": "error", "expected": "InvalidInstance", "state": "success"}, + ], }, "InstanceInService": { "acceptors": [ @@ -616,17 +415,13 @@ elb_data = { "argument": "InstanceStates[].State", "expected": "InService", "matcher": "pathAll", - "state": "success" + "state": "success", }, - { - "matcher": "error", - "expected": "InvalidInstance", - "state": "retry" - } + {"matcher": "error", "expected": "InvalidInstance", "state": "retry"}, ], "delay": 15, "maxAttempts": 40, - "operation": "DescribeInstanceHealth" + "operation": "DescribeInstanceHealth", }, "LoadBalancerCreated": { "delay": 10, @@ -664,7 +459,7 @@ elb_data = { }, ], }, - } + }, } elbv2_data = { @@ -679,20 +474,16 @@ elbv2_data = { "state": "success", "matcher": "pathAll", "argument": "LoadBalancers[].State.Code", - "expected": "active" + "expected": "active", }, { "state": "retry", "matcher": "pathAny", "argument": "LoadBalancers[].State.Code", - "expected": "provisioning" + "expected": "provisioning", }, - { - "state": "retry", - "matcher": "error", - "expected": "LoadBalancerNotFound" - } - ] + {"state": "retry", "matcher": "error", "expected": "LoadBalancerNotFound"}, + ], }, "LoadBalancerIpAddressTypeIpv4": { "delay": 15, @@ -703,20 +494,16 @@ elbv2_data = { "state": "success", "matcher": "pathAll", "argument": "LoadBalancers[].IpAddressType", - "expected": "ipv4" + "expected": "ipv4", }, { "state": "retry", "matcher": "pathAny", "argument": "LoadBalancers[].IpAddressType", - "expected": "dualstack" + "expected": "dualstack", }, - { - "state": "failure", - "matcher": "error", - "expected": "LoadBalancerNotFound" - } - ] + {"state": "failure", "matcher": "error", "expected": "LoadBalancerNotFound"}, + ], }, "LoadBalancerIpAddressTypeDualStack": { "delay": 15, @@ -727,20 +514,16 @@ elbv2_data = { "state": "success", "matcher": "pathAll", "argument": "LoadBalancers[].IpAddressType", - "expected": "dualstack" + "expected": "dualstack", }, { "state": "retry", "matcher": "pathAny", "argument": "LoadBalancers[].IpAddressType", - "expected": "ipv4" + "expected": "ipv4", }, - { - "state": "failure", - "matcher": "error", - "expected": "LoadBalancerNotFound" - } - ] + {"state": "failure", "matcher": "error", "expected": "LoadBalancerNotFound"}, + ], }, "LoadBalancersDeleted": { "delay": 15, @@ -751,22 +534,31 @@ elbv2_data = { "state": "retry", "matcher": "pathAll", "argument": "LoadBalancers[].State.Code", - "expected": "active" + "expected": "active", }, - { - "matcher": "error", - "expected": "LoadBalancerNotFound", - "state": "success" - } - ] + {"matcher": "error", "expected": "LoadBalancerNotFound", "state": "success"}, + ], }, - } + }, } rds_data = { "version": 2, "waiters": { + "DBClusterPromoting": { + "delay": 5, + "maxAttempts": 60, + "operation": "DescribeDBClusters", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBClusters[].Status", + "expected": "promoting", + }, + ], + }, "DBInstanceStopped": { "delay": 20, "maxAttempts": 60, @@ -776,45 +568,27 @@ rds_data = { "state": "success", "matcher": "pathAll", "argument": "DBInstances[].DBInstanceStatus", - "expected": "stopped" + "expected": "stopped", }, - ] + ], }, "DBClusterAvailable": { "delay": 20, "maxAttempts": 60, "operation": "DescribeDBClusters", "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "argument": "DBClusters[].Status", - "expected": "available" - }, - { - "state": "retry", - "matcher": "error", - "expected": "DBClusterNotFoundFault" - } - ] + {"state": "success", "matcher": "pathAll", "argument": "DBClusters[].Status", "expected": "available"}, + {"state": "retry", "matcher": "error", "expected": "DBClusterNotFoundFault"}, + ], }, "DBClusterDeleted": { "delay": 20, "maxAttempts": 60, "operation": "DescribeDBClusters", "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "argument": "DBClusters[].Status", - "expected": "stopped" - }, - { - "state": "success", - "matcher": "error", - "expected": "DBClusterNotFoundFault" - } - ] + {"state": "success", "matcher": "pathAll", "argument": "DBClusters[].Status", "expected": "stopped"}, + {"state": "success", "matcher": "error", "expected": "DBClusterNotFoundFault"}, + ], }, "ReadReplicaPromoted": { "delay": 5, @@ -825,15 +599,15 @@ rds_data = { "state": "success", "matcher": "path", "argument": "length(DBInstances[].StatusInfos) == `0`", - "expected": True + "expected": True, }, { "state": "retry", "matcher": "pathAny", "argument": "DBInstances[].StatusInfos[].Status", - "expected": "replicating" - } - ] + "expected": "replicating", + }, + ], }, "RoleAssociated": { "delay": 5, @@ -844,15 +618,15 @@ rds_data = { "state": "success", "matcher": "pathAll", "argument": "DBInstances[].AssociatedRoles[].Status", - "expected": "ACTIVE" + "expected": "ACTIVE", }, { "state": "retry", "matcher": "pathAny", "argument": "DBInstances[].AssociatedRoles[].Status", - "expected": "PENDING" - } - ] + "expected": "PENDING", + }, + ], }, "RoleDisassociated": { "delay": 5, @@ -863,23 +637,23 @@ rds_data = { "state": "success", "matcher": "pathAll", "argument": "DBInstances[].AssociatedRoles[].Status", - "expected": "ACTIVE" + "expected": "ACTIVE", }, { "state": "retry", "matcher": "pathAny", "argument": "DBInstances[].AssociatedRoles[].Status", - "expected": "PENDING" + "expected": "PENDING", }, { "state": "success", "matcher": "path", "argument": "length(DBInstances[].AssociatedRoles[]) == `0`", - "expected": True + "expected": True, }, - ] - } - } + ], + }, + }, } @@ -891,24 +665,23 @@ route53_data = { "maxAttempts": 60, "operation": "GetChange", "acceptors": [ - { - "matcher": "path", - "expected": "INSYNC", - "argument": "ChangeInfo.Status", - "state": "success" - } - ] + {"matcher": "path", "expected": "INSYNC", "argument": "ChangeInfo.Status", "state": "success"} + ], } - } + }, } def _inject_limit_retries(model): - extra_retries = [ - 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', - 'InternalFailure', 'InternalError', 'TooManyRequestsException', - 'Throttling'] + "RequestLimitExceeded", + "Unavailable", + "ServiceUnavailable", + "InternalFailure", + "InternalError", + "TooManyRequestsException", + "Throttling", + ] acceptors = [] for error in extra_retries: @@ -958,308 +731,246 @@ def route53_model(name): waiters_by_name = { - ('EC2', 'image_available'): lambda ec2: core_waiter.Waiter( - 'image_available', - ec2_model('ImageAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_images - )), - ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter( - 'internet_gateway_exists', - ec2_model('InternetGatewayExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_internet_gateways - )), - ('EC2', 'internet_gateway_attached'): lambda ec2: core_waiter.Waiter( - 'internet_gateway_attached', - ec2_model('InternetGatewayAttached'), - core_waiter.NormalizedOperationMethod( - ec2.describe_internet_gateways - )), - ('EC2', 'network_interface_attached'): lambda ec2: core_waiter.Waiter( - 'network_interface_attached', - ec2_model('NetworkInterfaceAttached'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'network_interface_deleted'): lambda ec2: core_waiter.Waiter( - 'network_interface_deleted', - ec2_model('NetworkInterfaceDeleted'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'network_interface_available'): lambda ec2: core_waiter.Waiter( - 'network_interface_available', - ec2_model('NetworkInterfaceAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'network_interface_delete_on_terminate'): lambda ec2: core_waiter.Waiter( - 'network_interface_delete_on_terminate', - ec2_model('NetworkInterfaceDeleteOnTerminate'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'network_interface_no_delete_on_terminate'): lambda ec2: core_waiter.Waiter( - 'network_interface_no_delete_on_terminate', - ec2_model('NetworkInterfaceNoDeleteOnTerminate'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter( - 'route_table_exists', - ec2_model('RouteTableExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_route_tables - )), - ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter( - 'security_group_exists', - ec2_model('SecurityGroupExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_security_groups - )), - ('EC2', 'snapshot_completed'): lambda ec2: core_waiter.Waiter( - 'snapshot_completed', - ec2_model('SnapshotCompleted'), - core_waiter.NormalizedOperationMethod( - ec2.describe_snapshots - )), - ('EC2', 'subnet_available'): lambda ec2: core_waiter.Waiter( - 'subnet_available', - ec2_model('SubnetAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter( - 'subnet_exists', - ec2_model('SubnetExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter( - 'subnet_has_map_public', - ec2_model('SubnetHasMapPublic'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter( - 'subnet_no_map_public', - ec2_model('SubnetNoMapPublic'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter( - 'subnet_has_assign_ipv6', - ec2_model('SubnetHasAssignIpv6'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter( - 'subnet_no_assign_ipv6', - ec2_model('SubnetNoAssignIpv6'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter( - 'subnet_deleted', - ec2_model('SubnetDeleted'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'vpc_available'): lambda ec2: core_waiter.Waiter( - 'vpc_available', - ec2_model('VpcAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpcs - )), - ('EC2', 'vpc_exists'): lambda ec2: core_waiter.Waiter( - 'vpc_exists', - ec2_model('VpcExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpcs - )), - ('EC2', 'vpc_endpoint_exists'): lambda ec2: core_waiter.Waiter( - 'vpc_endpoint_exists', - ec2_model('VpcEndpointExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpc_endpoints - )), - ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter( - 'vpn_gateway_exists', - ec2_model('VpnGatewayExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpn_gateways - )), - ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter( - 'vpn_gateway_detached', - ec2_model('VpnGatewayDetached'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpn_gateways - )), - ('EC2', 'nat_gateway_deleted'): lambda ec2: core_waiter.Waiter( - 'nat_gateway_deleted', - ec2_model('NatGatewayDeleted'), - core_waiter.NormalizedOperationMethod( - ec2.describe_nat_gateways - )), - ('EC2', 'nat_gateway_available'): lambda ec2: core_waiter.Waiter( - 'nat_gateway_available', - ec2_model('NatGatewayAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_nat_gateways - )), - ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter( - 'change_token_in_sync', - waf_model('ChangeTokenInSync'), - core_waiter.NormalizedOperationMethod( - waf.get_change_token_status - )), - ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter( - 'change_token_in_sync', - waf_model('ChangeTokenInSync'), - core_waiter.NormalizedOperationMethod( - waf.get_change_token_status - )), - ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter( - 'cluster_active', - eks_model('ClusterActive'), - core_waiter.NormalizedOperationMethod( - eks.describe_cluster - )), - ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter( - 'cluster_deleted', - eks_model('ClusterDeleted'), - core_waiter.NormalizedOperationMethod( - eks.describe_cluster - )), - ('EKS', 'fargate_profile_active'): lambda eks: core_waiter.Waiter( - 'fargate_profile_active', - eks_model('FargateProfileActive'), - core_waiter.NormalizedOperationMethod( - eks.describe_fargate_profile - )), - ('EKS', 'fargate_profile_deleted'): lambda eks: core_waiter.Waiter( - 'fargate_profile_deleted', - eks_model('FargateProfileDeleted'), - core_waiter.NormalizedOperationMethod( - eks.describe_fargate_profile - )), - ('EKS', 'nodegroup_active'): lambda eks: core_waiter.Waiter( - 'nodegroup_active', - eks_model('NodegroupActive'), - core_waiter.NormalizedOperationMethod( - eks.describe_nodegroup - )), - ('EKS', 'nodegroup_deleted'): lambda eks: core_waiter.Waiter( - 'nodegroup_deleted', - eks_model('NodegroupDeleted'), - core_waiter.NormalizedOperationMethod( - eks.describe_nodegroup - )), - ('ElasticLoadBalancing', 'any_instance_in_service'): lambda elb: core_waiter.Waiter( - 'any_instance_in_service', - elb_model('AnyInstanceInService'), - core_waiter.NormalizedOperationMethod( - elb.describe_instance_health - )), - ('ElasticLoadBalancing', 'instance_deregistered'): lambda elb: core_waiter.Waiter( - 'instance_deregistered', - elb_model('InstanceDeregistered'), - core_waiter.NormalizedOperationMethod( - elb.describe_instance_health - )), - ('ElasticLoadBalancing', 'instance_in_service'): lambda elb: core_waiter.Waiter( - 'load_balancer_created', - elb_model('InstanceInService'), - core_waiter.NormalizedOperationMethod( - elb.describe_instance_health - )), - ('ElasticLoadBalancing', 'load_balancer_created'): lambda elb: core_waiter.Waiter( - 'load_balancer_created', - elb_model('LoadBalancerCreated'), - core_waiter.NormalizedOperationMethod( - elb.describe_load_balancers - )), - ('ElasticLoadBalancing', 'load_balancer_deleted'): lambda elb: core_waiter.Waiter( - 'load_balancer_deleted', - elb_model('LoadBalancerDeleted'), - core_waiter.NormalizedOperationMethod( - elb.describe_load_balancers - )), - ('ElasticLoadBalancingv2', 'load_balancer_available'): lambda elbv2: core_waiter.Waiter( - 'load_balancer_available', - elbv2_model('LoadBalancerAvailable'), - core_waiter.NormalizedOperationMethod( - elbv2.describe_load_balancers - )), - ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_ipv4'): lambda elbv2: core_waiter.Waiter( - 'load_balancer_ip_address_type_ipv4', - elbv2_model('LoadBalancerIpAddressTypeIpv4'), - core_waiter.NormalizedOperationMethod( - elbv2.describe_load_balancers - )), - ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_dualstack'): lambda elbv2: core_waiter.Waiter( - 'load_balancers_ip_address_type_dualstack', - elbv2_model('LoadBalancerIpAddressTypeDualStack'), - core_waiter.NormalizedOperationMethod( - elbv2.describe_load_balancers - )), - ('ElasticLoadBalancingv2', 'load_balancers_deleted'): lambda elbv2: core_waiter.Waiter( - 'load_balancers_deleted', - elbv2_model('LoadBalancersDeleted'), - core_waiter.NormalizedOperationMethod( - elbv2.describe_load_balancers - )), - ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter( - 'db_instance_stopped', - rds_model('DBInstanceStopped'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_instances - )), - ('RDS', 'cluster_available'): lambda rds: core_waiter.Waiter( - 'cluster_available', - rds_model('DBClusterAvailable'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_clusters - )), - ('RDS', 'cluster_deleted'): lambda rds: core_waiter.Waiter( - 'cluster_deleted', - rds_model('DBClusterDeleted'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_clusters - )), - ('RDS', 'read_replica_promoted'): lambda rds: core_waiter.Waiter( - 'read_replica_promoted', - rds_model('ReadReplicaPromoted'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_instances - )), - ('RDS', 'role_associated'): lambda rds: core_waiter.Waiter( - 'role_associated', - rds_model('RoleAssociated'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_instances - )), - ('RDS', 'role_disassociated'): lambda rds: core_waiter.Waiter( - 'role_disassociated', - rds_model('RoleDisassociated'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_instances - )), - ('Route53', 'resource_record_sets_changed'): lambda route53: core_waiter.Waiter( - 'resource_record_sets_changed', - route53_model('ResourceRecordSetsChanged'), - core_waiter.NormalizedOperationMethod( - route53.get_change - )), + ("EC2", "image_available"): lambda ec2: core_waiter.Waiter( + "image_available", ec2_model("ImageAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_images) + ), + ("EC2", "internet_gateway_exists"): lambda ec2: core_waiter.Waiter( + "internet_gateway_exists", + ec2_model("InternetGatewayExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_internet_gateways), + ), + ("EC2", "internet_gateway_attached"): lambda ec2: core_waiter.Waiter( + "internet_gateway_attached", + ec2_model("InternetGatewayAttached"), + core_waiter.NormalizedOperationMethod(ec2.describe_internet_gateways), + ), + ("EC2", "network_interface_attached"): lambda ec2: core_waiter.Waiter( + "network_interface_attached", + ec2_model("NetworkInterfaceAttached"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "network_interface_deleted"): lambda ec2: core_waiter.Waiter( + "network_interface_deleted", + ec2_model("NetworkInterfaceDeleted"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "network_interface_available"): lambda ec2: core_waiter.Waiter( + "network_interface_available", + ec2_model("NetworkInterfaceAvailable"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "network_interface_delete_on_terminate"): lambda ec2: core_waiter.Waiter( + "network_interface_delete_on_terminate", + ec2_model("NetworkInterfaceDeleteOnTerminate"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "network_interface_no_delete_on_terminate"): lambda ec2: core_waiter.Waiter( + "network_interface_no_delete_on_terminate", + ec2_model("NetworkInterfaceNoDeleteOnTerminate"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "route_table_exists"): lambda ec2: core_waiter.Waiter( + "route_table_exists", + ec2_model("RouteTableExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_route_tables), + ), + ("EC2", "security_group_exists"): lambda ec2: core_waiter.Waiter( + "security_group_exists", + ec2_model("SecurityGroupExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_security_groups), + ), + ("EC2", "snapshot_completed"): lambda ec2: core_waiter.Waiter( + "snapshot_completed", + ec2_model("SnapshotCompleted"), + core_waiter.NormalizedOperationMethod(ec2.describe_snapshots), + ), + ("EC2", "subnet_available"): lambda ec2: core_waiter.Waiter( + "subnet_available", ec2_model("SubnetAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets) + ), + ("EC2", "subnet_exists"): lambda ec2: core_waiter.Waiter( + "subnet_exists", ec2_model("SubnetExists"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets) + ), + ("EC2", "subnet_has_map_public"): lambda ec2: core_waiter.Waiter( + "subnet_has_map_public", + ec2_model("SubnetHasMapPublic"), + core_waiter.NormalizedOperationMethod(ec2.describe_subnets), + ), + ("EC2", "subnet_no_map_public"): lambda ec2: core_waiter.Waiter( + "subnet_no_map_public", + ec2_model("SubnetNoMapPublic"), + core_waiter.NormalizedOperationMethod(ec2.describe_subnets), + ), + ("EC2", "subnet_has_assign_ipv6"): lambda ec2: core_waiter.Waiter( + "subnet_has_assign_ipv6", + ec2_model("SubnetHasAssignIpv6"), + core_waiter.NormalizedOperationMethod(ec2.describe_subnets), + ), + ("EC2", "subnet_no_assign_ipv6"): lambda ec2: core_waiter.Waiter( + "subnet_no_assign_ipv6", + ec2_model("SubnetNoAssignIpv6"), + core_waiter.NormalizedOperationMethod(ec2.describe_subnets), + ), + ("EC2", "subnet_deleted"): lambda ec2: core_waiter.Waiter( + "subnet_deleted", ec2_model("SubnetDeleted"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets) + ), + ("EC2", "vpc_available"): lambda ec2: core_waiter.Waiter( + "vpc_available", ec2_model("VpcAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_vpcs) + ), + ("EC2", "vpc_exists"): lambda ec2: core_waiter.Waiter( + "vpc_exists", ec2_model("VpcExists"), core_waiter.NormalizedOperationMethod(ec2.describe_vpcs) + ), + ("EC2", "vpc_endpoint_exists"): lambda ec2: core_waiter.Waiter( + "vpc_endpoint_exists", + ec2_model("VpcEndpointExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_vpc_endpoints), + ), + ("EC2", "vpn_gateway_exists"): lambda ec2: core_waiter.Waiter( + "vpn_gateway_exists", + ec2_model("VpnGatewayExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_vpn_gateways), + ), + ("EC2", "vpn_gateway_detached"): lambda ec2: core_waiter.Waiter( + "vpn_gateway_detached", + ec2_model("VpnGatewayDetached"), + core_waiter.NormalizedOperationMethod(ec2.describe_vpn_gateways), + ), + ("EC2", "nat_gateway_deleted"): lambda ec2: core_waiter.Waiter( + "nat_gateway_deleted", + ec2_model("NatGatewayDeleted"), + core_waiter.NormalizedOperationMethod(ec2.describe_nat_gateways), + ), + ("EC2", "nat_gateway_available"): lambda ec2: core_waiter.Waiter( + "nat_gateway_available", + ec2_model("NatGatewayAvailable"), + core_waiter.NormalizedOperationMethod(ec2.describe_nat_gateways), + ), + ("WAF", "change_token_in_sync"): lambda waf: core_waiter.Waiter( + "change_token_in_sync", + waf_model("ChangeTokenInSync"), + core_waiter.NormalizedOperationMethod(waf.get_change_token_status), + ), + ("WAFRegional", "change_token_in_sync"): lambda waf: core_waiter.Waiter( + "change_token_in_sync", + waf_model("ChangeTokenInSync"), + core_waiter.NormalizedOperationMethod(waf.get_change_token_status), + ), + ("EKS", "cluster_active"): lambda eks: core_waiter.Waiter( + "cluster_active", eks_model("ClusterActive"), core_waiter.NormalizedOperationMethod(eks.describe_cluster) + ), + ("EKS", "cluster_deleted"): lambda eks: core_waiter.Waiter( + "cluster_deleted", eks_model("ClusterDeleted"), core_waiter.NormalizedOperationMethod(eks.describe_cluster) + ), + ("EKS", "fargate_profile_active"): lambda eks: core_waiter.Waiter( + "fargate_profile_active", + eks_model("FargateProfileActive"), + core_waiter.NormalizedOperationMethod(eks.describe_fargate_profile), + ), + ("EKS", "fargate_profile_deleted"): lambda eks: core_waiter.Waiter( + "fargate_profile_deleted", + eks_model("FargateProfileDeleted"), + core_waiter.NormalizedOperationMethod(eks.describe_fargate_profile), + ), + ("EKS", "nodegroup_active"): lambda eks: core_waiter.Waiter( + "nodegroup_active", eks_model("NodegroupActive"), core_waiter.NormalizedOperationMethod(eks.describe_nodegroup) + ), + ("EKS", "nodegroup_deleted"): lambda eks: core_waiter.Waiter( + "nodegroup_deleted", + eks_model("NodegroupDeleted"), + core_waiter.NormalizedOperationMethod(eks.describe_nodegroup), + ), + ("ElasticLoadBalancing", "any_instance_in_service"): lambda elb: core_waiter.Waiter( + "any_instance_in_service", + elb_model("AnyInstanceInService"), + core_waiter.NormalizedOperationMethod(elb.describe_instance_health), + ), + ("ElasticLoadBalancing", "instance_deregistered"): lambda elb: core_waiter.Waiter( + "instance_deregistered", + elb_model("InstanceDeregistered"), + core_waiter.NormalizedOperationMethod(elb.describe_instance_health), + ), + ("ElasticLoadBalancing", "instance_in_service"): lambda elb: core_waiter.Waiter( + "load_balancer_created", + elb_model("InstanceInService"), + core_waiter.NormalizedOperationMethod(elb.describe_instance_health), + ), + ("ElasticLoadBalancing", "load_balancer_created"): lambda elb: core_waiter.Waiter( + "load_balancer_created", + elb_model("LoadBalancerCreated"), + core_waiter.NormalizedOperationMethod(elb.describe_load_balancers), + ), + ("ElasticLoadBalancing", "load_balancer_deleted"): lambda elb: core_waiter.Waiter( + "load_balancer_deleted", + elb_model("LoadBalancerDeleted"), + core_waiter.NormalizedOperationMethod(elb.describe_load_balancers), + ), + ("ElasticLoadBalancingv2", "load_balancer_available"): lambda elbv2: core_waiter.Waiter( + "load_balancer_available", + elbv2_model("LoadBalancerAvailable"), + core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers), + ), + ("ElasticLoadBalancingv2", "load_balancer_ip_address_type_ipv4"): lambda elbv2: core_waiter.Waiter( + "load_balancer_ip_address_type_ipv4", + elbv2_model("LoadBalancerIpAddressTypeIpv4"), + core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers), + ), + ("ElasticLoadBalancingv2", "load_balancer_ip_address_type_dualstack"): lambda elbv2: core_waiter.Waiter( + "load_balancers_ip_address_type_dualstack", + elbv2_model("LoadBalancerIpAddressTypeDualStack"), + core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers), + ), + ("ElasticLoadBalancingv2", "load_balancers_deleted"): lambda elbv2: core_waiter.Waiter( + "load_balancers_deleted", + elbv2_model("LoadBalancersDeleted"), + core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers), + ), + ("RDS", "db_cluster_promoting"): lambda rds: core_waiter.Waiter( + "db_cluster_promoting", + rds_model("DBClusterPromoting"), + core_waiter.NormalizedOperationMethod(rds.describe_db_clusters), + ), + ("RDS", "db_instance_stopped"): lambda rds: core_waiter.Waiter( + "db_instance_stopped", + rds_model("DBInstanceStopped"), + core_waiter.NormalizedOperationMethod(rds.describe_db_instances), + ), + ("RDS", "cluster_available"): lambda rds: core_waiter.Waiter( + "cluster_available", + rds_model("DBClusterAvailable"), + core_waiter.NormalizedOperationMethod(rds.describe_db_clusters), + ), + ("RDS", "cluster_deleted"): lambda rds: core_waiter.Waiter( + "cluster_deleted", + rds_model("DBClusterDeleted"), + core_waiter.NormalizedOperationMethod(rds.describe_db_clusters), + ), + ("RDS", "read_replica_promoted"): lambda rds: core_waiter.Waiter( + "read_replica_promoted", + rds_model("ReadReplicaPromoted"), + core_waiter.NormalizedOperationMethod(rds.describe_db_instances), + ), + ("RDS", "role_associated"): lambda rds: core_waiter.Waiter( + "role_associated", rds_model("RoleAssociated"), core_waiter.NormalizedOperationMethod(rds.describe_db_instances) + ), + ("RDS", "role_disassociated"): lambda rds: core_waiter.Waiter( + "role_disassociated", + rds_model("RoleDisassociated"), + core_waiter.NormalizedOperationMethod(rds.describe_db_instances), + ), + ("Route53", "resource_record_sets_changed"): lambda route53: core_waiter.Waiter( + "resource_record_sets_changed", + route53_model("ResourceRecordSetsChanged"), + core_waiter.NormalizedOperationMethod(route53.get_change), + ), } def get_waiter(client, waiter_name): - if isinstance(client, _RetryingBotoClientWrapper): + if isinstance(client, RetryingBotoClientWrapper): return get_waiter(client.client, waiter_name) try: return waiters_by_name[(client.__class__.__name__, waiter_name)](client) except KeyError: - raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format( - waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys()))) + available_waiters = ", ".join(repr(k) for k in waiters_by_name.keys()) + raise NotImplementedError( + f"Waiter {waiter_name} could not be found for client {type(client)}. Available waiters: {available_waiters}" + ) diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py index aefe46570..fcd89b467 100644 --- a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: autoscaling_group version_added: 5.0.0 @@ -335,23 +333,23 @@ options: type: list elements: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Basic configuration with Launch Configuration - amazon.aws.autoscaling_group: name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + load_balancers: ['lb1', 'lb2'] + availability_zones: ['eu-west-1a', 'eu-west-1b'] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + vpc_zone_identifier: ['subnet-abcd1234', 'subnet-1a2b3c4d'] tags: - environment: production propagate_at_launch: false @@ -398,8 +396,8 @@ EXAMPLES = r''' health_check_period: 60 health_check_type: ELB replace_instances: - - i-b345231 - - i-24c2931 + - i-b345231 + - i-24c2931 min_size: 5 max_size: 5 desired_capacity: 5 @@ -409,16 +407,16 @@ EXAMPLES = r''' - amazon.aws.autoscaling_group: name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + load_balancers: ['lb1', 'lb2'] + availability_zones: ['eu-west-1a', 'eu-west-1b'] launch_template: - version: '1' - launch_template_name: 'lt-example' - launch_template_id: 'lt-123456' + version: '1' + launch_template_name: 'lt-example' + launch_template_id: 'lt-123456' min_size: 1 max_size: 10 desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + vpc_zone_identifier: ['subnet-abcd1234', 'subnet-1a2b3c4d'] tags: - environment: production propagate_at_launch: false @@ -427,30 +425,30 @@ EXAMPLES = r''' - amazon.aws.autoscaling_group: name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + load_balancers: ['lb1', 'lb2'] + availability_zones: ['eu-west-1a', 'eu-west-1b'] launch_template: - version: '1' - launch_template_name: 'lt-example' - launch_template_id: 'lt-123456' + version: '1' + launch_template_name: 'lt-example' + launch_template_id: 'lt-123456' mixed_instances_policy: - instance_types: - - t3a.large - - t3.large - - t2.large - instances_distribution: - on_demand_percentage_above_base_capacity: 0 - spot_allocation_strategy: capacity-optimized + instance_types: + - t3a.large + - t3.large + - t2.large + instances_distribution: + on_demand_percentage_above_base_capacity: 0 + spot_allocation_strategy: capacity-optimized min_size: 1 max_size: 10 desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + vpc_zone_identifier: ['subnet-abcd1234', 'subnet-1a2b3c4d'] tags: - environment: production propagate_at_launch: false -''' +""" -RETURN = r''' +RETURN = r""" --- auto_scaling_group_name: description: The unique name of the auto scaling group @@ -652,7 +650,7 @@ metrics_collection: "Metric": "GroupInServiceInstances" } ] -''' +""" import time @@ -662,34 +660,47 @@ except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - -ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', - 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', - 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize', - 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies', - 'VPCZoneIdentifier') - -INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + +ASG_ATTRIBUTES = ( + "AvailabilityZones", + "DefaultCooldown", + "DesiredCapacity", + "HealthCheckGracePeriod", + "HealthCheckType", + "LaunchConfigurationName", + "LoadBalancerNames", + "MaxInstanceLifetime", + "MaxSize", + "MinSize", + "AutoScalingGroupName", + "PlacementGroup", + "TerminationPolicies", + "VPCZoneIdentifier", +) + +INSTANCE_ATTRIBUTES = ("instance_id", "health_status", "lifecycle_state", "launch_config_name") backoff_params = dict(retries=10, delay=3, backoff=1.5) @AWSRetry.jittered_backoff(**backoff_params) def describe_autoscaling_groups(connection, group_name): - pg = connection.get_paginator('describe_auto_scaling_groups') - return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) + pg = connection.get_paginator("describe_auto_scaling_groups") + return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get("AutoScalingGroups", []) @AWSRetry.jittered_backoff(**backoff_params) def deregister_lb_instances(connection, lb_name, instance_id): - connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) + connection.deregister_instances_from_load_balancer( + LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)] + ) @AWSRetry.jittered_backoff(**backoff_params) @@ -717,24 +728,24 @@ def resume_asg_processes(connection, asg_name, processes): @AWSRetry.jittered_backoff(**backoff_params) def describe_launch_configurations(connection, launch_config_name): - pg = connection.get_paginator('describe_launch_configurations') + pg = connection.get_paginator("describe_launch_configurations") return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() @AWSRetry.jittered_backoff(**backoff_params) def describe_launch_templates(connection, launch_template): - if launch_template['launch_template_id'] is not None: + if launch_template["launch_template_id"] is not None: try: - lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']]) + lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template["launch_template_id"]]) return lt - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): - module.fail_json(msg="No launch template found matching: %s" % launch_template) + except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException"): + module.fail_json(msg=f"No launch template found matching: {launch_template}") else: try: - lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']]) + lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template["launch_template_name"]]) return lt - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): - module.fail_json(msg="No launch template found matching: %s" % launch_template) + except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException"): + module.fail_json(msg=f"No launch template found matching: {launch_template}") @AWSRetry.jittered_backoff(**backoff_params) @@ -745,18 +756,13 @@ def create_asg(connection, **params): @AWSRetry.jittered_backoff(**backoff_params) def put_notification_config(connection, asg_name, topic_arn, notification_types): connection.put_notification_configuration( - AutoScalingGroupName=asg_name, - TopicARN=topic_arn, - NotificationTypes=notification_types + AutoScalingGroupName=asg_name, TopicARN=topic_arn, NotificationTypes=notification_types ) @AWSRetry.jittered_backoff(**backoff_params) def del_notification_config(connection, asg_name, topic_arn): - connection.delete_notification_configuration( - AutoScalingGroupName=asg_name, - TopicARN=topic_arn - ) + connection.delete_notification_configuration(AutoScalingGroupName=asg_name, TopicARN=topic_arn) @AWSRetry.jittered_backoff(**backoff_params) @@ -784,35 +790,37 @@ def update_asg(connection, **params): connection.update_auto_scaling_group(**params) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["ScalingActivityInProgress"], **backoff_params) def delete_asg(connection, asg_name, force_delete): connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) @AWSRetry.jittered_backoff(**backoff_params) def terminate_asg_instance(connection, instance_id, decrement_capacity): - connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, - ShouldDecrementDesiredCapacity=decrement_capacity) + connection.terminate_instance_in_auto_scaling_group( + InstanceId=instance_id, ShouldDecrementDesiredCapacity=decrement_capacity + ) @AWSRetry.jittered_backoff(**backoff_params) def detach_asg_instances(connection, instance_ids, as_group_name, decrement_capacity): - connection.detach_instances(InstanceIds=instance_ids, AutoScalingGroupName=as_group_name, - ShouldDecrementDesiredCapacity=decrement_capacity) + connection.detach_instances( + InstanceIds=instance_ids, AutoScalingGroupName=as_group_name, ShouldDecrementDesiredCapacity=decrement_capacity + ) def enforce_required_arguments_for_create(): - ''' As many arguments are not required for autoscale group deletion - they cannot be mandatory arguments for the module, so we enforce - them here ''' + """As many arguments are not required for autoscale group deletion + they cannot be mandatory arguments for the module, so we enforce + them here""" missing_args = [] - if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None: + if module.params.get("launch_config_name") is None and module.params.get("launch_template") is None: module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create") - for arg in ('min_size', 'max_size'): + for arg in ("min_size", "max_size"): if module.params[arg] is None: missing_args.append(arg) if missing_args: - module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args)) + module.fail_json(msg=f"Missing required arguments for autoscaling group create: {','.join(missing_args)}") def get_properties(autoscaling_group): @@ -822,71 +830,73 @@ def get_properties(autoscaling_group): unhealthy_instances=0, pending_instances=0, viable_instances=0, - terminating_instances=0 + terminating_instances=0, ) instance_facts = dict() - autoscaling_group_instances = autoscaling_group.get('Instances') + autoscaling_group_instances = autoscaling_group.get("Instances") if autoscaling_group_instances: - properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances] + properties["instances"] = [i["InstanceId"] for i in autoscaling_group_instances] for i in autoscaling_group_instances: - instance_facts[i['InstanceId']] = { - 'health_status': i['HealthStatus'], - 'lifecycle_state': i['LifecycleState'] + instance_facts[i["InstanceId"]] = { + "health_status": i["HealthStatus"], + "lifecycle_state": i["LifecycleState"], } - if 'LaunchConfigurationName' in i: - instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName'] - elif 'LaunchTemplate' in i: - instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate'] + if "LaunchConfigurationName" in i: + instance_facts[i["InstanceId"]]["launch_config_name"] = i["LaunchConfigurationName"] + elif "LaunchTemplate" in i: + instance_facts[i["InstanceId"]]["launch_template"] = i["LaunchTemplate"] - if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService': - properties['viable_instances'] += 1 + if i["HealthStatus"] == "Healthy" and i["LifecycleState"] == "InService": + properties["viable_instances"] += 1 - if i['HealthStatus'] == 'Healthy': - properties['healthy_instances'] += 1 + if i["HealthStatus"] == "Healthy": + properties["healthy_instances"] += 1 else: - properties['unhealthy_instances'] += 1 - - if i['LifecycleState'] == 'InService': - properties['in_service_instances'] += 1 - if i['LifecycleState'] == 'Terminating': - properties['terminating_instances'] += 1 - if i['LifecycleState'] == 'Pending': - properties['pending_instances'] += 1 + properties["unhealthy_instances"] += 1 + + if i["LifecycleState"] == "InService": + properties["in_service_instances"] += 1 + if i["LifecycleState"] == "Terminating": + properties["terminating_instances"] += 1 + if i["LifecycleState"] == "Pending": + properties["pending_instances"] += 1 else: - properties['instances'] = [] - - properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName') - properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN') - properties['availability_zones'] = autoscaling_group.get('AvailabilityZones') - properties['created_time'] = autoscaling_group.get('CreatedTime') - properties['instance_facts'] = instance_facts - properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames') - if 'LaunchConfigurationName' in autoscaling_group: - properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName') + properties["instances"] = [] + + properties["auto_scaling_group_name"] = autoscaling_group.get("AutoScalingGroupName") + properties["auto_scaling_group_arn"] = autoscaling_group.get("AutoScalingGroupARN") + properties["availability_zones"] = autoscaling_group.get("AvailabilityZones") + properties["created_time"] = autoscaling_group.get("CreatedTime") + properties["instance_facts"] = instance_facts + properties["load_balancers"] = autoscaling_group.get("LoadBalancerNames") + if "LaunchConfigurationName" in autoscaling_group: + properties["launch_config_name"] = autoscaling_group.get("LaunchConfigurationName") else: - properties['launch_template'] = autoscaling_group.get('LaunchTemplate') - properties['tags'] = autoscaling_group.get('Tags') - properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime') - properties['min_size'] = autoscaling_group.get('MinSize') - properties['max_size'] = autoscaling_group.get('MaxSize') - properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity') - properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') - properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod') - properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType') - properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') - properties['termination_policies'] = autoscaling_group.get('TerminationPolicies') - properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs') - properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier') - raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy') + properties["launch_template"] = autoscaling_group.get("LaunchTemplate") + properties["tags"] = autoscaling_group.get("Tags") + properties["max_instance_lifetime"] = autoscaling_group.get("MaxInstanceLifetime") + properties["min_size"] = autoscaling_group.get("MinSize") + properties["max_size"] = autoscaling_group.get("MaxSize") + properties["desired_capacity"] = autoscaling_group.get("DesiredCapacity") + properties["default_cooldown"] = autoscaling_group.get("DefaultCooldown") + properties["healthcheck_grace_period"] = autoscaling_group.get("HealthCheckGracePeriod") + properties["healthcheck_type"] = autoscaling_group.get("HealthCheckType") + properties["default_cooldown"] = autoscaling_group.get("DefaultCooldown") + properties["termination_policies"] = autoscaling_group.get("TerminationPolicies") + properties["target_group_arns"] = autoscaling_group.get("TargetGroupARNs") + properties["vpc_zone_identifier"] = autoscaling_group.get("VPCZoneIdentifier") + raw_mixed_instance_object = autoscaling_group.get("MixedInstancesPolicy") if raw_mixed_instance_object: - properties['mixed_instances_policy_full'] = camel_dict_to_snake_dict(raw_mixed_instance_object) - properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')] + properties["mixed_instances_policy_full"] = camel_dict_to_snake_dict(raw_mixed_instance_object) + properties["mixed_instances_policy"] = [ + x["InstanceType"] for x in raw_mixed_instance_object.get("LaunchTemplate").get("Overrides") + ] - metrics = autoscaling_group.get('EnabledMetrics') + metrics = autoscaling_group.get("EnabledMetrics") if metrics: metrics.sort(key=lambda x: x["Metric"]) - properties['metrics_collection'] = metrics + properties["metrics_collection"] = metrics if properties["target_group_arns"]: elbv2_connection = module.client("elbv2") @@ -897,7 +907,7 @@ def get_properties(autoscaling_group): tg_chunks = [ properties["target_group_arns"][i: i + tg_chunk_size] for i in range(0, len(properties["target_group_arns"]), tg_chunk_size) - ] + ] # fmt: skip for chunk in tg_chunks: tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() properties["target_group_names"].extend([tg["TargetGroupName"] for tg in tg_result["TargetGroups"]]) @@ -909,9 +919,9 @@ def get_properties(autoscaling_group): def get_launch_object(connection, ec2_connection): launch_object = dict() - launch_config_name = module.params.get('launch_config_name') - launch_template = module.params.get('launch_template') - mixed_instances_policy = module.params.get('mixed_instances_policy') + launch_config_name = module.params.get("launch_config_name") + launch_template = module.params.get("launch_template") + mixed_instances_policy = module.params.get("mixed_instances_policy") if launch_config_name is None and launch_template is None: return launch_object elif launch_config_name: @@ -919,64 +929,71 @@ def get_launch_object(connection, ec2_connection): launch_configs = describe_launch_configurations(connection, launch_config_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe launch configurations") - if len(launch_configs['LaunchConfigurations']) == 0: - module.fail_json(msg="No launch config found with name %s" % launch_config_name) - launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']} + if len(launch_configs["LaunchConfigurations"]) == 0: + module.fail_json(msg=f"No launch config found with name {launch_config_name}") + launch_object = { + "LaunchConfigurationName": launch_configs["LaunchConfigurations"][0]["LaunchConfigurationName"] + } return launch_object elif launch_template: - lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0] - if launch_template['version'] is not None: - launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}} + lt = describe_launch_templates(ec2_connection, launch_template)["LaunchTemplates"][0] + if launch_template["version"] is not None: + launch_object = { + "LaunchTemplate": {"LaunchTemplateId": lt["LaunchTemplateId"], "Version": launch_template["version"]} + } else: - launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}} - - if mixed_instances_policy: - instance_types = mixed_instances_policy.get('instance_types', []) - instances_distribution = mixed_instances_policy.get('instances_distribution', {}) - policy = { - 'LaunchTemplate': { - 'LaunchTemplateSpecification': launch_object['LaunchTemplate'] + launch_object = { + "LaunchTemplate": { + "LaunchTemplateId": lt["LaunchTemplateId"], + "Version": str(lt["LatestVersionNumber"]), } } + + if mixed_instances_policy: + instance_types = mixed_instances_policy.get("instance_types", []) + instances_distribution = mixed_instances_policy.get("instances_distribution", {}) + policy = {"LaunchTemplate": {"LaunchTemplateSpecification": launch_object["LaunchTemplate"]}} if instance_types: - policy['LaunchTemplate']['Overrides'] = [] + policy["LaunchTemplate"]["Overrides"] = [] for instance_type in instance_types: - instance_type_dict = {'InstanceType': instance_type} - policy['LaunchTemplate']['Overrides'].append(instance_type_dict) + instance_type_dict = {"InstanceType": instance_type} + policy["LaunchTemplate"]["Overrides"].append(instance_type_dict) if instances_distribution: instances_distribution_params = scrub_none_parameters(instances_distribution) - policy['InstancesDistribution'] = snake_dict_to_camel_dict(instances_distribution_params, capitalize_first=True) - launch_object['MixedInstancesPolicy'] = policy + policy["InstancesDistribution"] = snake_dict_to_camel_dict( + instances_distribution_params, capitalize_first=True + ) + launch_object["MixedInstancesPolicy"] = policy return launch_object def elb_dreg(asg_connection, group_name, instance_id): as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") count = 1 - if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB': - elb_connection = module.client('elb') + if as_group["LoadBalancerNames"] and as_group["HealthCheckType"] == "ELB": + elb_connection = module.client("elb") else: return - for lb in as_group['LoadBalancerNames']: + for lb in as_group["LoadBalancerNames"]: deregister_lb_instances(elb_connection, lb, instance_id) - module.debug("De-registering %s from ELB %s" % (instance_id, lb)) + module.debug(f"De-registering {instance_id} from ELB {lb}") wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: count = 0 - for lb in as_group['LoadBalancerNames']: + for lb in as_group["LoadBalancerNames"]: lb_instances = describe_instance_health(elb_connection, lb, []) - for i in lb_instances['InstanceStates']: - if i['InstanceId'] == instance_id and i['State'] == "InService": + for i in lb_instances["InstanceStates"]: + if i["InstanceId"] == instance_id and i["State"] == "InService": count += 1 - module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description'])) + module.debug(f"{i['InstanceId']}: {i['State']}, {i['Description']}") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime())) + module.fail_json(msg=f"Waited too long for instance to deregister. {time.asctime()}") def elb_healthy(asg_connection, elb_connection, group_name): @@ -985,26 +1002,29 @@ def elb_healthy(asg_connection, elb_connection, group_name): props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] - for instance, settings in props['instance_facts'].items(): - if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + for instance, settings in props["instance_facts"].items(): + if settings["lifecycle_state"] == "InService" and settings["health_status"] == "Healthy": instances.append(dict(InstanceId=instance)) - module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug(f"ASG considers the following instances InService and Healthy: {instances}") module.debug("ELB instance status:") lb_instances = list() - for lb in as_group.get('LoadBalancerNames'): + for lb in as_group.get("LoadBalancerNames"): # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: lb_instances = describe_instance_health(elb_connection, lb, instances) - except is_boto3_error_code('InvalidInstance'): + except is_boto3_error_code("InvalidInstance"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get load balancer.") - for i in lb_instances.get('InstanceStates'): - if i['State'] == "InService": - healthy_instances.add(i['InstanceId']) - module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State'])) + for i in lb_instances.get("InstanceStates"): + if i["State"] == "InService": + healthy_instances.add(i["InstanceId"]) + module.debug(f"ELB Health State {i['InstanceId']}: {i['State']}") return len(healthy_instances) @@ -1014,82 +1034,85 @@ def tg_healthy(asg_connection, elbv2_connection, group_name): props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] - for instance, settings in props['instance_facts'].items(): - if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + for instance, settings in props["instance_facts"].items(): + if settings["lifecycle_state"] == "InService" and settings["health_status"] == "Healthy": instances.append(dict(Id=instance)) - module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug(f"ASG considers the following instances InService and Healthy: {instances}") module.debug("Target Group instance status:") tg_instances = list() - for tg in as_group.get('TargetGroupARNs'): + for tg in as_group.get("TargetGroupARNs"): # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: tg_instances = describe_target_health(elbv2_connection, tg, instances) - except is_boto3_error_code('InvalidInstance'): + except is_boto3_error_code("InvalidInstance"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get target group.") - for i in tg_instances.get('TargetHealthDescriptions'): - if i['TargetHealth']['State'] == "healthy": - healthy_instances.add(i['Target']['Id']) - module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State'])) + for i in tg_instances.get("TargetHealthDescriptions"): + if i["TargetHealth"]["State"] == "healthy": + healthy_instances.add(i["Target"]["Id"]) + module.debug(f"Target Group Health State {i['Target']['Id']}: {i['TargetHealth']['State']}") return len(healthy_instances) def wait_for_elb(asg_connection, group_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB': + if as_group.get("LoadBalancerNames") and as_group.get("HealthCheckType") == "ELB": module.debug("Waiting for ELB to consider instances healthy.") - elb_connection = module.client('elb') + elb_connection = module.client("elb") wait_timeout = time.time() + wait_timeout healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) - while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): + while healthy_instances < as_group.get("MinSize") and wait_timeout > time.time(): healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) - module.debug("ELB thinks %s instances are healthy." % healthy_instances) + module.debug(f"ELB thinks {healthy_instances} instances are healthy.") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances) + module.fail_json(msg=f"Waited too long for ELB instances to be healthy. {time.asctime()}") + module.debug(f"Waiting complete. ELB thinks {healthy_instances} instances are healthy.") def wait_for_target_group(asg_connection, group_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB': + if as_group.get("TargetGroupARNs") and as_group.get("HealthCheckType") == "ELB": module.debug("Waiting for Target Group to consider instances healthy.") - elbv2_connection = module.client('elbv2') + elbv2_connection = module.client("elbv2") wait_timeout = time.time() + wait_timeout healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) - while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): + while healthy_instances < as_group.get("MinSize") and wait_timeout > time.time(): healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) - module.debug("Target Group thinks %s instances are healthy." % healthy_instances) + module.debug(f"Target Group thinks {healthy_instances} instances are healthy.") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances) + module.fail_json(msg=f"Waited too long for ELB instances to be healthy. {time.asctime()}") + module.debug(f"Waiting complete. Target Group thinks {healthy_instances} instances are healthy.") def suspend_processes(ec2_connection, as_group): - suspend_processes = set(module.params.get('suspend_processes')) + suspend_processes = set(module.params.get("suspend_processes")) try: - suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']]) + suspended_processes = set([p["ProcessName"] for p in as_group["SuspendedProcesses"]]) except AttributeError: # New ASG being created, no suspended_processes defined yet suspended_processes = set() @@ -1099,68 +1122,71 @@ def suspend_processes(ec2_connection, as_group): resume_processes = list(suspended_processes - suspend_processes) if resume_processes: - resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes) + resume_asg_processes(ec2_connection, module.params.get("name"), resume_processes) if suspend_processes: - suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes)) + suspend_asg_processes(ec2_connection, module.params.get("name"), list(suspend_processes)) return True def create_autoscaling_group(connection): - group_name = module.params.get('name') - load_balancers = module.params['load_balancers'] - target_group_arns = module.params['target_group_arns'] - availability_zones = module.params['availability_zones'] - launch_config_name = module.params.get('launch_config_name') - launch_template = module.params.get('launch_template') - mixed_instances_policy = module.params.get('mixed_instances_policy') - min_size = module.params['min_size'] - max_size = module.params['max_size'] - max_instance_lifetime = module.params.get('max_instance_lifetime') - placement_group = module.params.get('placement_group') - desired_capacity = module.params.get('desired_capacity') - vpc_zone_identifier = module.params.get('vpc_zone_identifier') - set_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - health_check_period = module.params.get('health_check_period') - health_check_type = module.params.get('health_check_type') - default_cooldown = module.params.get('default_cooldown') - wait_for_instances = module.params.get('wait_for_instances') - wait_timeout = module.params.get('wait_timeout') - termination_policies = module.params.get('termination_policies') - notification_topic = module.params.get('notification_topic') - notification_types = module.params.get('notification_types') - metrics_collection = module.params.get('metrics_collection') - metrics_granularity = module.params.get('metrics_granularity') - metrics_list = module.params.get('metrics_list') + group_name = module.params.get("name") + load_balancers = module.params["load_balancers"] + target_group_arns = module.params["target_group_arns"] + availability_zones = module.params["availability_zones"] + launch_template = module.params.get("launch_template") + min_size = module.params["min_size"] + max_size = module.params["max_size"] + max_instance_lifetime = module.params.get("max_instance_lifetime") + placement_group = module.params.get("placement_group") + desired_capacity = module.params.get("desired_capacity") + vpc_zone_identifier = module.params.get("vpc_zone_identifier") + set_tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + health_check_period = module.params.get("health_check_period") + health_check_type = module.params.get("health_check_type") + default_cooldown = module.params.get("default_cooldown") + wait_for_instances = module.params.get("wait_for_instances") + wait_timeout = module.params.get("wait_timeout") + termination_policies = module.params.get("termination_policies") + notification_topic = module.params.get("notification_topic") + notification_types = module.params.get("notification_types") + metrics_collection = module.params.get("metrics_collection") + metrics_granularity = module.params.get("metrics_granularity") + metrics_list = module.params.get("metrics_list") try: as_groups = describe_autoscaling_groups(connection, group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe auto scaling groups.") - ec2_connection = module.client('ec2') + ec2_connection = module.client("ec2") if vpc_zone_identifier: - vpc_zone_identifier = ','.join(vpc_zone_identifier) + vpc_zone_identifier = ",".join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: for k, v in tag.items(): - if k != 'propagate_at_launch': - asg_tags.append(dict(Key=k, - Value=to_native(v), - PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)), - ResourceType='auto-scaling-group', - ResourceId=group_name)) + if k != "propagate_at_launch": + asg_tags.append( + dict( + Key=k, + Value=to_native(v), + PropagateAtLaunch=bool(tag.get("propagate_at_launch", True)), + ResourceType="auto-scaling-group", + ResourceId=group_name, + ) + ) if not as_groups: if module.check_mode: module.exit_json(changed=True, msg="Would have created AutoScalingGroup if not in check_mode.") if not vpc_zone_identifier and not availability_zones: - availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for - zone in ec2_connection.describe_availability_zones()['AvailabilityZones']] + availability_zones = module.params["availability_zones"] = [ + zone["ZoneName"] for zone in ec2_connection.describe_availability_zones()["AvailabilityZones"] + ] enforce_required_arguments_for_create() @@ -1175,43 +1201,46 @@ def create_autoscaling_group(connection): HealthCheckGracePeriod=health_check_period, HealthCheckType=health_check_type, DefaultCooldown=default_cooldown, - TerminationPolicies=termination_policies) + TerminationPolicies=termination_policies, + ) if vpc_zone_identifier: - ag['VPCZoneIdentifier'] = vpc_zone_identifier + ag["VPCZoneIdentifier"] = vpc_zone_identifier if availability_zones: - ag['AvailabilityZones'] = availability_zones + ag["AvailabilityZones"] = availability_zones if placement_group: - ag['PlacementGroup'] = placement_group + ag["PlacementGroup"] = placement_group if load_balancers: - ag['LoadBalancerNames'] = load_balancers + ag["LoadBalancerNames"] = load_balancers if target_group_arns: - ag['TargetGroupARNs'] = target_group_arns + ag["TargetGroupARNs"] = target_group_arns if max_instance_lifetime: - ag['MaxInstanceLifetime'] = max_instance_lifetime + ag["MaxInstanceLifetime"] = max_instance_lifetime launch_object = get_launch_object(connection, ec2_connection) - if 'LaunchConfigurationName' in launch_object: - ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] - elif 'LaunchTemplate' in launch_object: - if 'MixedInstancesPolicy' in launch_object: - ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] + if "LaunchConfigurationName" in launch_object: + ag["LaunchConfigurationName"] = launch_object["LaunchConfigurationName"] + elif "LaunchTemplate" in launch_object: + if "MixedInstancesPolicy" in launch_object: + ag["MixedInstancesPolicy"] = launch_object["MixedInstancesPolicy"] else: - ag['LaunchTemplate'] = launch_object['LaunchTemplate'] + ag["LaunchTemplate"] = launch_object["LaunchTemplate"] else: - module.fail_json_aws(e, msg="Missing LaunchConfigurationName or LaunchTemplate") + module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate") try: create_asg(connection, **ag) if metrics_collection: - connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) + connection.enable_metrics_collection( + AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list + ) all_ag = describe_autoscaling_groups(connection, group_name) if len(all_ag) == 0: - module.fail_json(msg="No auto scaling group found with the name %s" % group_name) + module.fail_json(msg=f"No auto scaling group found with the name {group_name}") as_group = all_ag[0] suspend_processes(connection, as_group) if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, "viable_instances") if load_balancers: wait_for_elb(connection, group_name) # Wait for target group health if target group(s)defined @@ -1237,7 +1266,7 @@ def create_autoscaling_group(connection): changed = True # process tag changes - have_tags = as_group.get('Tags') + have_tags = as_group.get("Tags") want_tags = asg_tags if purge_tags and not want_tags and have_tags: connection.delete_tags(Tags=list(have_tags)) @@ -1248,15 +1277,18 @@ def create_autoscaling_group(connection): if want_tags: want_tags.sort(key=lambda x: x["Key"]) dead_tags = [] - have_tag_keyvals = [x['Key'] for x in have_tags] - want_tag_keyvals = [x['Key'] for x in want_tags] + have_tag_keyvals = [x["Key"] for x in have_tags] + want_tag_keyvals = [x["Key"] for x in want_tags] for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals): changed = True if purge_tags: - dead_tags.append(dict( - ResourceId=as_group['AutoScalingGroupName'], ResourceType='auto-scaling-group', Key=dead_tag)) - have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag] + dead_tags.append( + dict( + ResourceId=as_group["AutoScalingGroupName"], ResourceType="auto-scaling-group", Key=dead_tag + ) + ) + have_tags = [have_tag for have_tag in have_tags if have_tag["Key"] != dead_tag] if dead_tags: connection.delete_tags(Tags=dead_tags) @@ -1268,7 +1300,7 @@ def create_autoscaling_group(connection): # Handle load balancer attachments/detachments # Attach load balancers if they are specified but none currently exist - if load_balancers and not as_group['LoadBalancerNames']: + if load_balancers and not as_group["LoadBalancerNames"]: changed = True try: attach_load_balancers(connection, group_name, load_balancers) @@ -1276,14 +1308,14 @@ def create_autoscaling_group(connection): module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") # Update load balancers if they are specified and one or more already exists - elif as_group['LoadBalancerNames']: + elif as_group["LoadBalancerNames"]: change_load_balancers = load_balancers is not None # Get differences if not load_balancers: load_balancers = list() wanted_elbs = set(load_balancers) - has_elbs = set(as_group['LoadBalancerNames']) + has_elbs = set(as_group["LoadBalancerNames"]) # check if all requested are already existing if has_elbs - wanted_elbs and change_load_balancers: # if wanted contains less than existing, then we need to delete some @@ -1293,7 +1325,7 @@ def create_autoscaling_group(connection): try: detach_load_balancers(connection, group_name, list(elbs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach load balancers {0}".format(elbs_to_detach)) + module.fail_json_aws(e, msg=f"Failed to detach load balancers {elbs_to_detach}") if wanted_elbs - has_elbs: # if has contains less than wanted, then we need to add some elbs_to_attach = wanted_elbs.difference(has_elbs) @@ -1302,21 +1334,21 @@ def create_autoscaling_group(connection): try: attach_load_balancers(connection, group_name, list(elbs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to attach load balancers {0}".format(elbs_to_attach)) + module.fail_json_aws(e, msg=f"Failed to attach load balancers {elbs_to_attach}") # Handle target group attachments/detachments # Attach target groups if they are specified but none currently exist - if target_group_arns and not as_group['TargetGroupARNs']: + if target_group_arns and not as_group["TargetGroupARNs"]: changed = True try: attach_lb_target_groups(connection, group_name, target_group_arns) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") # Update target groups if they are specified and one or more already exists - elif target_group_arns is not None and as_group['TargetGroupARNs']: + elif target_group_arns is not None and as_group["TargetGroupARNs"]: # Get differences wanted_tgs = set(target_group_arns) - has_tgs = set(as_group['TargetGroupARNs']) + has_tgs = set(as_group["TargetGroupARNs"]) tgs_to_detach = has_tgs.difference(wanted_tgs) if tgs_to_detach: @@ -1324,7 +1356,7 @@ def create_autoscaling_group(connection): try: detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach)) + module.fail_json_aws(e, msg=f"Failed to detach load balancer target groups {tgs_to_detach}") tgs_to_attach = wanted_tgs.difference(has_tgs) if tgs_to_attach: @@ -1332,16 +1364,16 @@ def create_autoscaling_group(connection): try: attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach)) + module.fail_json(msg=f"Failed to attach load balancer target groups {tgs_to_attach}") # check for attributes that aren't required for updating an existing ASG # check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: - min_size = as_group['MinSize'] + min_size = as_group["MinSize"] if max_size is None: - max_size = as_group['MaxSize'] + max_size = as_group["MaxSize"] if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] + desired_capacity = as_group["DesiredCapacity"] ag = dict( AutoScalingGroupName=group_name, MinSize=min_size, @@ -1350,37 +1382,43 @@ def create_autoscaling_group(connection): HealthCheckGracePeriod=health_check_period, HealthCheckType=health_check_type, DefaultCooldown=default_cooldown, - TerminationPolicies=termination_policies) + TerminationPolicies=termination_policies, + ) # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not. launch_object = get_launch_object(connection, ec2_connection) - if 'LaunchConfigurationName' in launch_object: - ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] - elif 'LaunchTemplate' in launch_object: - if 'MixedInstancesPolicy' in launch_object: - ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] + if "LaunchConfigurationName" in launch_object: + ag["LaunchConfigurationName"] = launch_object["LaunchConfigurationName"] + elif "LaunchTemplate" in launch_object: + if "MixedInstancesPolicy" in launch_object: + ag["MixedInstancesPolicy"] = launch_object["MixedInstancesPolicy"] else: - ag['LaunchTemplate'] = launch_object['LaunchTemplate'] + ag["LaunchTemplate"] = launch_object["LaunchTemplate"] else: try: - ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName'] - except Exception: - launch_template = as_group['LaunchTemplate'] + ag["LaunchConfigurationName"] = as_group["LaunchConfigurationName"] + except KeyError: + launch_template = as_group["LaunchTemplate"] # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg. - ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']} + ag["LaunchTemplate"] = { + "LaunchTemplateId": launch_template["LaunchTemplateId"], + "Version": launch_template["Version"], + } if availability_zones: - ag['AvailabilityZones'] = availability_zones + ag["AvailabilityZones"] = availability_zones if vpc_zone_identifier: - ag['VPCZoneIdentifier'] = vpc_zone_identifier + ag["VPCZoneIdentifier"] = vpc_zone_identifier if max_instance_lifetime is not None: - ag['MaxInstanceLifetime'] = max_instance_lifetime + ag["MaxInstanceLifetime"] = max_instance_lifetime try: update_asg(connection, **ag) if metrics_collection: - connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) + connection.enable_metrics_collection( + AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list + ) else: connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list) @@ -1393,15 +1431,15 @@ def create_autoscaling_group(connection): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update Autoscaling Group notifications.") if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, "viable_instances") # Wait for ELB health if ELB(s)defined if load_balancers: - module.debug('\tWAITING FOR ELB HEALTH') + module.debug("\tWAITING FOR ELB HEALTH") wait_for_elb(connection, group_name) # Wait for target group health if target group(s)defined if target_group_arns: - module.debug('\tWAITING FOR TG HEALTH') + module.debug("\tWAITING FOR TG HEALTH") wait_for_target_group(connection, group_name) try: @@ -1415,10 +1453,10 @@ def create_autoscaling_group(connection): def delete_autoscaling_group(connection): - group_name = module.params.get('name') - notification_topic = module.params.get('notification_topic') - wait_for_instances = module.params.get('wait_for_instances') - wait_timeout = module.params.get('wait_timeout') + group_name = module.params.get("name") + notification_topic = module.params.get("notification_topic") + wait_for_instances = module.params.get("wait_for_instances") + wait_timeout = module.params.get("wait_timeout") if notification_topic: del_notification_config(connection, group_name, notification_topic) @@ -1437,20 +1475,20 @@ def delete_autoscaling_group(connection): tmp_groups = describe_autoscaling_groups(connection, group_name) if tmp_groups: tmp_group = tmp_groups[0] - if not tmp_group.get('Instances'): + if not tmp_group.get("Instances"): instances = False time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for old instances to terminate. {time.asctime()}") delete_asg(connection, group_name, force_delete=False) while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time(): time.sleep(5) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for ASG to delete. {time.asctime()}") return True return False @@ -1458,53 +1496,53 @@ def delete_autoscaling_group(connection): def get_chunks(l, n): for i in range(0, len(l), n): - yield l[i:i + n] + yield l[i:i + n] # fmt: skip def update_size(connection, group, max_size, min_size, dc): module.debug("setting ASG sizes") - module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) + module.debug(f"minimum size: {min_size}, desired_capacity: {dc}, max size: {max_size}") updated_group = dict() - updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName'] - updated_group['MinSize'] = min_size - updated_group['MaxSize'] = max_size - updated_group['DesiredCapacity'] = dc + updated_group["AutoScalingGroupName"] = group["AutoScalingGroupName"] + updated_group["MinSize"] = min_size + updated_group["MaxSize"] = max_size + updated_group["DesiredCapacity"] = dc update_asg(connection, **updated_group) def replace(connection): - batch_size = module.params.get('replace_batch_size') - wait_timeout = module.params.get('wait_timeout') - wait_for_instances = module.params.get('wait_for_instances') - group_name = module.params.get('name') - max_size = module.params.get('max_size') - min_size = module.params.get('min_size') - desired_capacity = module.params.get('desired_capacity') - launch_config_name = module.params.get('launch_config_name') + batch_size = module.params.get("replace_batch_size") + wait_timeout = module.params.get("wait_timeout") + wait_for_instances = module.params.get("wait_for_instances") + group_name = module.params.get("name") + max_size = module.params.get("max_size") + min_size = module.params.get("min_size") + desired_capacity = module.params.get("desired_capacity") + launch_config_name = module.params.get("launch_config_name") # Required to maintain the default value being set to 'true' if launch_config_name: - lc_check = module.params.get('lc_check') + lc_check = module.params.get("lc_check") else: lc_check = False # Mirror above behavior for Launch Templates - launch_template = module.params.get('launch_template') + launch_template = module.params.get("launch_template") if launch_template: - lt_check = module.params.get('lt_check') + lt_check = module.params.get("lt_check") else: lt_check = False - replace_instances = module.params.get('replace_instances') - replace_all_instances = module.params.get('replace_all_instances') + replace_instances = module.params.get("replace_instances") + replace_all_instances = module.params.get("replace_all_instances") as_group = describe_autoscaling_groups(connection, group_name)[0] if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] + desired_capacity = as_group["DesiredCapacity"] if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, as_group["MinSize"], "viable_instances") props = get_properties(as_group) - instances = props['instances'] + instances = props["instances"] if replace_all_instances: # If replacing all instances, then set replace_instances to current set # This allows replace_instances and replace_all_instances to behave same @@ -1531,7 +1569,7 @@ def replace(connection): # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: - module.debug("Overriding batch size to %s" % num_new_inst_needed) + module.debug(f"Overriding batch size to {num_new_inst_needed}") batch_size = num_new_inst_needed if not old_instances: @@ -1540,9 +1578,9 @@ def replace(connection): # check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: - min_size = as_group['MinSize'] + min_size = as_group["MinSize"] if max_size is None: - max_size = as_group['MaxSize'] + max_size = as_group["MaxSize"] # set temporary settings and wait for them to be reached # This should get overwritten if the number of instances left is less than the batch size. @@ -1551,13 +1589,13 @@ def replace(connection): update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, as_group["MinSize"] + batch_size, "viable_instances") wait_for_elb(connection, group_name) wait_for_target_group(connection, group_name) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) - instances = props['instances'] + instances = props["instances"] if replace_instances: instances = replace_instances @@ -1568,7 +1606,7 @@ def replace(connection): if wait_for_instances: wait_for_term_inst(connection, term_instances) - wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, desired_size, "viable_instances") wait_for_elb(connection, group_name) wait_for_target_group(connection, group_name) @@ -1585,13 +1623,13 @@ def replace(connection): def detach(connection): - group_name = module.params.get('name') - detach_instances = module.params.get('detach_instances') + group_name = module.params.get("name") + detach_instances = module.params.get("detach_instances") as_group = describe_autoscaling_groups(connection, group_name)[0] - decrement_desired_capacity = module.params.get('decrement_desired_capacity') - min_size = module.params.get('min_size') + decrement_desired_capacity = module.params.get("decrement_desired_capacity") + min_size = module.params.get("min_size") props = get_properties(as_group) - instances = props['instances'] + instances = props["instances"] # check if provided instance exists in asg, create list of instances to detach which exist in asg instances_to_detach = [] @@ -1605,8 +1643,12 @@ def detach(connection): decremented_desired_capacity = len(instances) - len(instances_to_detach) if min_size and min_size > decremented_desired_capacity: module.fail_json( - msg="Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to {0}\ - which is below current min_size {1}, please update AutoScalingGroup Sizes properly.".format(decremented_desired_capacity, min_size)) + msg=( + "Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to" + f" {decremented_desired_capacity} which is below current min_size {min_size}, please update" + " AutoScalingGroup Sizes properly." + ) + ) if instances_to_detach: try: @@ -1623,25 +1665,25 @@ def get_instances_by_launch_config(props, lc_check, initial_instances): old_instances = [] # old instances are those that have the old launch config if lc_check: - for i in props['instances']: + for i in props["instances"]: # Check if migrating from launch_template to launch_config first - if 'launch_template' in props['instance_facts'][i]: + if "launch_template" in props["instance_facts"][i]: old_instances.append(i) - elif props['instance_facts'][i].get('launch_config_name') == props['launch_config_name']: + elif props["instance_facts"][i].get("launch_config_name") == props["launch_config_name"]: new_instances.append(i) else: old_instances.append(i) else: - module.debug("Comparing initial instances with current: %s" % initial_instances) - for i in props['instances']: + module.debug(f"Comparing initial instances with current: {*initial_instances, }") + for i in props["instances"]: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) - module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) - module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + module.debug(f"New instances: {len(new_instances)}, {*new_instances, }") + module.debug(f"Old instances: {len(old_instances)}, {*old_instances, }") return new_instances, old_instances @@ -1651,51 +1693,51 @@ def get_instances_by_launch_template(props, lt_check, initial_instances): old_instances = [] # old instances are those that have the old launch template or version of the same launch template if lt_check: - for i in props['instances']: + for i in props["instances"]: # Check if migrating from launch_config_name to launch_template_name first - if 'launch_config_name' in props['instance_facts'][i]: + if "launch_config_name" in props["instance_facts"][i]: old_instances.append(i) - elif props['instance_facts'][i].get('launch_template') == props['launch_template']: + elif props["instance_facts"][i].get("launch_template") == props["launch_template"]: new_instances.append(i) else: old_instances.append(i) else: - module.debug("Comparing initial instances with current: %s" % initial_instances) - for i in props['instances']: + module.debug(f"Comparing initial instances with current: {*initial_instances, }") + for i in props["instances"]: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) - module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) - module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + module.debug(f"New instances: {len(new_instances)}, {*new_instances, }") + module.debug(f"Old instances: {len(old_instances)}, {*old_instances, }") return new_instances, old_instances def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances): instances_to_terminate = [] - instances = (inst_id for inst_id in replace_instances if inst_id in props['instances']) + instances = (inst_id for inst_id in replace_instances if inst_id in props["instances"]) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config - if 'launch_config_name' in module.params: + if "launch_config_name" in module.params: if lc_check: for i in instances: if ( - 'launch_template' in props['instance_facts'][i] - or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name'] + "launch_template" in props["instance_facts"][i] + or props["instance_facts"][i]["launch_config_name"] != props["launch_config_name"] ): instances_to_terminate.append(i) else: for i in instances: if i in initial_instances: instances_to_terminate.append(i) - elif 'launch_template' in module.params: + elif "launch_template" in module.params: if lt_check: for i in instances: if ( - 'launch_config_name' in props['instance_facts'][i] - or props['instance_facts'][i]['launch_template'] != props['launch_template'] + "launch_config_name" in props["instance_facts"][i] + or props["instance_facts"][i]["launch_template"] != props["launch_template"] ): instances_to_terminate.append(i) else: @@ -1707,22 +1749,22 @@ def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initi def terminate_batch(connection, replace_instances, initial_instances, leftovers=False): - batch_size = module.params.get('replace_batch_size') - min_size = module.params.get('min_size') - desired_capacity = module.params.get('desired_capacity') - group_name = module.params.get('name') - lc_check = module.params.get('lc_check') - lt_check = module.params.get('lt_check') + batch_size = module.params.get("replace_batch_size") + min_size = module.params.get("min_size") + desired_capacity = module.params.get("desired_capacity") + group_name = module.params.get("name") + lc_check = module.params.get("lc_check") + lt_check = module.params.get("lt_check") decrement_capacity = False break_loop = False as_group = describe_autoscaling_groups(connection, group_name)[0] if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] + desired_capacity = as_group["DesiredCapacity"] props = get_properties(as_group) - desired_size = as_group['MinSize'] - if module.params.get('launch_config_name'): + desired_size = as_group["MinSize"] + if module.params.get("launch_config_name"): new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances) else: new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances) @@ -1732,19 +1774,19 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= # and they have a non-current launch config instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances) - module.debug("new instances needed: %s" % num_new_inst_needed) - module.debug("new instances: %s" % new_instances) - module.debug("old instances: %s" % old_instances) - module.debug("batch instances: %s" % ",".join(instances_to_terminate)) + module.debug(f"new instances needed: {num_new_inst_needed}") + module.debug(f"new instances: {*new_instances, }") + module.debug(f"old instances: {*old_instances, }") + module.debug(f"batch instances: {*instances_to_terminate, }") if num_new_inst_needed == 0: decrement_capacity = True - if as_group['MinSize'] != min_size: + if as_group["MinSize"] != min_size: if min_size is None: - min_size = as_group['MinSize'] - updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size) + min_size = as_group["MinSize"] + updated_params = dict(AutoScalingGroupName=as_group["AutoScalingGroupName"], MinSize=min_size) update_asg(connection, **updated_params) - module.debug("Updating minimum size back to original of %s" % min_size) + module.debug(f"Updating minimum size back to original of {min_size}") # if are some leftover old instances, but we are already at capacity with new ones # we don't want to decrement capacity if leftovers: @@ -1758,13 +1800,13 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= instances_to_terminate = instances_to_terminate[:num_new_inst_needed] decrement_capacity = False break_loop = False - module.debug("%s new instances needed" % num_new_inst_needed) + module.debug(f"{num_new_inst_needed} new instances needed") - module.debug("decrementing capacity: %s" % decrement_capacity) + module.debug(f"decrementing capacity: {decrement_capacity}") for instance_id in instances_to_terminate: elb_dreg(connection, group_name, instance_id) - module.debug("terminating instance: %s" % instance_id) + module.debug(f"terminating instance: {instance_id}") terminate_asg_instance(connection, instance_id, decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are @@ -1774,8 +1816,8 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= def wait_for_term_inst(connection, term_instances): - wait_timeout = module.params.get('wait_timeout') - group_name = module.params.get('name') + wait_timeout = module.params.get("wait_timeout") + group_name = module.params.get("name") as_group = describe_autoscaling_groups(connection, group_name)[0] count = 1 wait_timeout = time.time() + wait_timeout @@ -1784,134 +1826,131 @@ def wait_for_term_inst(connection, term_instances): count = 0 as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) - instance_facts = props['instance_facts'] + instance_facts = props["instance_facts"] instances = (i for i in instance_facts if i in term_instances) for i in instances: - lifecycle = instance_facts[i]['lifecycle_state'] - health = instance_facts[i]['health_status'] - module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health)) - if lifecycle.startswith('Terminating') or health == 'Unhealthy': + lifecycle = instance_facts[i]["lifecycle_state"] + health = instance_facts[i]["health_status"] + module.debug(f"Instance {i} has state of {lifecycle},{health}") + if lifecycle.startswith("Terminating") or health == "Unhealthy": count += 1 time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for old instances to terminate. {time.asctime()}") def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) - module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + module.debug(f"Waiting for {prop} = {desired_size}, currently {props[prop]}") # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: - module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + module.debug(f"Waiting for {prop} = {desired_size}, currently {props[prop]}") time.sleep(10) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) - module.debug("Reached %s: %s" % (prop, desired_size)) + module.fail_json(msg=f"Waited too long for new instances to become viable. {time.asctime()}") + module.debug(f"Reached {prop}: {desired_size}") return props def asg_exists(connection): - group_name = module.params.get('name') + group_name = module.params.get("name") as_group = describe_autoscaling_groups(connection, group_name) return bool(len(as_group)) def main(): argument_spec = dict( - name=dict(required=True, type='str'), - load_balancers=dict(type='list', elements='str'), - target_group_arns=dict(type='list', elements='str'), - availability_zones=dict(type='list', elements='str'), - launch_config_name=dict(type='str'), + name=dict(required=True, type="str"), + load_balancers=dict(type="list", elements="str"), + target_group_arns=dict(type="list", elements="str"), + availability_zones=dict(type="list", elements="str"), + launch_config_name=dict(type="str"), launch_template=dict( - type='dict', + type="dict", default=None, options=dict( - version=dict(type='str'), - launch_template_name=dict(type='str'), - launch_template_id=dict(type='str'), - ) + version=dict(type="str"), + launch_template_name=dict(type="str"), + launch_template_id=dict(type="str"), + ), ), - min_size=dict(type='int'), - max_size=dict(type='int'), - max_instance_lifetime=dict(type='int'), + min_size=dict(type="int"), + max_size=dict(type="int"), + max_instance_lifetime=dict(type="int"), mixed_instances_policy=dict( - type='dict', + type="dict", default=None, options=dict( - instance_types=dict( - type='list', - elements='str' - ), + instance_types=dict(type="list", elements="str"), instances_distribution=dict( - type='dict', + type="dict", default=None, options=dict( - on_demand_allocation_strategy=dict(type='str'), - on_demand_base_capacity=dict(type='int'), - on_demand_percentage_above_base_capacity=dict(type='int'), - spot_allocation_strategy=dict(type='str'), - spot_instance_pools=dict(type='int'), - spot_max_price=dict(type='str'), - ) - ) - ) + on_demand_allocation_strategy=dict(type="str"), + on_demand_base_capacity=dict(type="int"), + on_demand_percentage_above_base_capacity=dict(type="int"), + spot_allocation_strategy=dict(type="str"), + spot_instance_pools=dict(type="int"), + spot_max_price=dict(type="str"), + ), + ), + ), ), - placement_group=dict(type='str'), - desired_capacity=dict(type='int'), - vpc_zone_identifier=dict(type='list', elements='str'), - replace_batch_size=dict(type='int', default=1), - replace_all_instances=dict(type='bool', default=False), - replace_instances=dict(type='list', default=[], elements='str'), - detach_instances=dict(type='list', default=[], elements='str'), - decrement_desired_capacity=dict(type='bool', default=False), - lc_check=dict(type='bool', default=True), - lt_check=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', default=[], elements='dict'), - purge_tags=dict(type='bool', default=False), - health_check_period=dict(type='int', default=300), - health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), - default_cooldown=dict(type='int', default=300), - wait_for_instances=dict(type='bool', default=True), - termination_policies=dict(type='list', default='Default', elements='str'), - notification_topic=dict(type='str', default=None), + placement_group=dict(type="str"), + desired_capacity=dict(type="int"), + vpc_zone_identifier=dict(type="list", elements="str"), + replace_batch_size=dict(type="int", default=1), + replace_all_instances=dict(type="bool", default=False), + replace_instances=dict(type="list", default=[], elements="str"), + detach_instances=dict(type="list", default=[], elements="str"), + decrement_desired_capacity=dict(type="bool", default=False), + lc_check=dict(type="bool", default=True), + lt_check=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="list", default=[], elements="dict"), + purge_tags=dict(type="bool", default=False), + health_check_period=dict(type="int", default=300), + health_check_type=dict(default="EC2", choices=["EC2", "ELB"]), + default_cooldown=dict(type="int", default=300), + wait_for_instances=dict(type="bool", default=True), + termination_policies=dict(type="list", default="Default", elements="str"), + notification_topic=dict(type="str", default=None), notification_types=dict( - type='list', + type="list", default=[ - 'autoscaling:EC2_INSTANCE_LAUNCH', - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', - 'autoscaling:EC2_INSTANCE_TERMINATE', - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' + "autoscaling:EC2_INSTANCE_LAUNCH", + "autoscaling:EC2_INSTANCE_LAUNCH_ERROR", + "autoscaling:EC2_INSTANCE_TERMINATE", + "autoscaling:EC2_INSTANCE_TERMINATE_ERROR", ], - elements='str' + elements="str", ), - suspend_processes=dict(type='list', default=[], elements='str'), - metrics_collection=dict(type='bool', default=False), - metrics_granularity=dict(type='str', default='1Minute'), + suspend_processes=dict(type="list", default=[], elements="str"), + metrics_collection=dict(type="bool", default=False), + metrics_granularity=dict(type="str", default="1Minute"), metrics_list=dict( - type='list', + type="list", default=[ - 'GroupMinSize', - 'GroupMaxSize', - 'GroupDesiredCapacity', - 'GroupInServiceInstances', - 'GroupPendingInstances', - 'GroupStandbyInstances', - 'GroupTerminatingInstances', - 'GroupTotalInstances' + "GroupMinSize", + "GroupMaxSize", + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances", ], - elements='str' - ) + elements="str", + ), ) global module @@ -1919,24 +1958,24 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['replace_all_instances', 'replace_instances'], - ['replace_all_instances', 'detach_instances'], - ['launch_config_name', 'launch_template'], - ] + ["replace_all_instances", "replace_instances"], + ["replace_all_instances", "detach_instances"], + ["launch_config_name", "launch_template"], + ], ) - state = module.params.get('state') - replace_instances = module.params.get('replace_instances') - replace_all_instances = module.params.get('replace_all_instances') - detach_instances = module.params.get('detach_instances') + state = module.params.get("state") + replace_instances = module.params.get("replace_instances") + replace_all_instances = module.params.get("replace_all_instances") + detach_instances = module.params.get("detach_instances") - connection = module.client('autoscaling') + connection = module.client("autoscaling") changed = create_changed = replace_changed = detach_changed = False exists = asg_exists(connection) - if state == 'present': + if state == "present": create_changed, asg_properties = create_autoscaling_group(connection) - elif state == 'absent': + elif state == "absent": changed = delete_autoscaling_group(connection) module.exit_json(changed=changed) @@ -1944,7 +1983,7 @@ def main(): if ( exists and (replace_all_instances or replace_instances) - and (module.params.get('launch_config_name') or module.params.get('launch_template')) + and (module.params.get("launch_config_name") or module.params.get("launch_template")) ): replace_changed, asg_properties = replace(connection) @@ -1952,7 +1991,7 @@ def main(): if ( exists and (detach_instances) - and (module.params.get('launch_config_name') or module.params.get('launch_template')) + and (module.params.get("launch_config_name") or module.params.get("launch_template")) ): detach_changed, asg_properties = detach(connection) @@ -1962,5 +2001,5 @@ def main(): module.exit_json(changed=changed, **asg_properties) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py index c33d0352f..8a39e200b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_group_info version_added: 5.0.0 @@ -33,12 +31,12 @@ options: required: false type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Find all groups @@ -75,9 +73,9 @@ EXAMPLES = ''' name: public-webserver-asg register: asgs failed_when: "{{ asgs.results | length > 1 }}" -''' +""" -RETURN = ''' +RETURN = r""" --- auto_scaling_group_arn: description: The Amazon Resource Name of the ASG @@ -238,7 +236,7 @@ termination_policies: returned: success type: str sample: ["Default"] -''' +""" import re @@ -249,14 +247,14 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def match_asg_tags(tags_to_match, asg): for key, value in tags_to_match.items(): - for tag in asg['Tags']: - if key == tag['Key'] and value == tag['Value']: + for tag in asg["Tags"]: + if key == tag["Key"] and value == tag["Value"]: break else: return False @@ -373,16 +371,16 @@ def find_asgs(conn, module, name=None, tags=None): """ try: - asgs_paginator = conn.get_paginator('describe_auto_scaling_groups') + asgs_paginator = conn.get_paginator("describe_auto_scaling_groups") asgs = asgs_paginator.paginate().build_full_result() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups') + module.fail_json_aws(e, msg="Failed to describe AutoScalingGroups") if not asgs: return asgs try: - elbv2 = module.client('elbv2') + elbv2 = module.client("elbv2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): # This is nice to have, not essential elbv2 = None @@ -390,11 +388,11 @@ def find_asgs(conn, module, name=None, tags=None): if name is not None: # if the user didn't specify a name - name_prog = re.compile(r'^' + name) + name_prog = re.compile(r"^" + name) - for asg in asgs['AutoScalingGroups']: + for asg in asgs["AutoScalingGroups"]: if name: - matched_name = name_prog.search(asg['AutoScalingGroupName']) + matched_name = name_prog.search(asg["AutoScalingGroupName"]) else: matched_name = True @@ -406,13 +404,13 @@ def find_asgs(conn, module, name=None, tags=None): if matched_name and matched_tags: asg = camel_dict_to_snake_dict(asg) # compatibility with autoscaling_group module - if 'launch_configuration_name' in asg: - asg['launch_config_name'] = asg['launch_configuration_name'] + if "launch_configuration_name" in asg: + asg["launch_config_name"] = asg["launch_configuration_name"] # workaround for https://github.com/ansible/ansible/pull/25015 - if 'target_group_ar_ns' in asg: - asg['target_group_arns'] = asg['target_group_ar_ns'] - del asg['target_group_ar_ns'] - if asg.get('target_group_arns'): + if "target_group_ar_ns" in asg: + asg["target_group_arns"] = asg["target_group_ar_ns"] + del asg["target_group_ar_ns"] + if asg.get("target_group_arns"): if elbv2: try: tg_paginator = elbv2.get_paginator("describe_target_groups") @@ -422,7 +420,7 @@ def find_asgs(conn, module, name=None, tags=None): tg_chunks = [ asg["target_group_arns"][i: i + tg_chunk_size] for i in range(0, len(asg["target_group_arns"]), tg_chunk_size) - ] + ] # fmt: skip for chunk in tg_chunks: tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() asg["target_group_names"].extend( @@ -436,11 +434,11 @@ def find_asgs(conn, module, name=None, tags=None): ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe Target Groups") else: - asg['target_group_names'] = [] + asg["target_group_names"] = [] # get asg lifecycle hooks if any try: - asg_lifecyclehooks = conn.describe_lifecycle_hooks(AutoScalingGroupName=asg['auto_scaling_group_name']) - asg['lifecycle_hooks'] = asg_lifecyclehooks['LifecycleHooks'] + asg_lifecyclehooks = conn.describe_lifecycle_hooks(AutoScalingGroupName=asg["auto_scaling_group_name"]) + asg["lifecycle_hooks"] = asg_lifecyclehooks["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to fetch information about ASG lifecycle hooks") matched_asgs.append(asg) @@ -449,10 +447,9 @@ def find_asgs(conn, module, name=None, tags=None): def main(): - argument_spec = dict( - name=dict(type='str'), - tags=dict(type='dict'), + name=dict(type="str"), + tags=dict(type="dict"), ) module = AnsibleAWSModule( @@ -460,14 +457,14 @@ def main(): supports_check_mode=True, ) - asg_name = module.params.get('name') - asg_tags = module.params.get('tags') + asg_name = module.params.get("name") + asg_tags = module.params.get("tags") - autoscaling = module.client('autoscaling') + autoscaling = module.client("autoscaling") results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags) module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py index 246321b56..a373f41bc 100644 --- a/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: aws_az_info short_description: Gather information about availability zones in AWS version_added: 1.0.0 @@ -26,12 +24,12 @@ options: default: {} type: dict extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all availability zones @@ -41,9 +39,15 @@ EXAMPLES = ''' amazon.aws.aws_az_info: filters: zone-name: eu-west-1a -''' -RETURN = ''' +- name: Gather information in a availability zones based on their state, such as "available" + amazon.aws.aws_az_info: + region: us-east-1 + filters: + state: available +""" + +RETURN = r""" availability_zones: returned: on success description: > @@ -141,46 +145,47 @@ availability_zones: "zone_type": "availability-zone" } ] -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def main(): - argument_spec = dict( - filters=dict(default={}, type='dict') - ) + argument_spec = dict(filters=dict(default={}, type="dict")) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) # Replace filter key underscores with dashes, for compatibility - sanitized_filters = dict(module.params.get('filters')) - for k in module.params.get('filters').keys(): + sanitized_filters = dict(module.params.get("filters")) + for k in module.params.get("filters").keys(): if "_" in k: - sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] + sanitized_filters[k.replace("_", "-")] = sanitized_filters[k] del sanitized_filters[k] try: - availability_zones = connection.describe_availability_zones(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)) + availability_zones = connection.describe_availability_zones( + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to describe availability zones.") # Turn the boto3 result into ansible_friendly_snaked_names - snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']] + snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones["AvailabilityZones"]] module.exit_json(availability_zones=snaked_availability_zones) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py index 3c6691606..0ed62fa0c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: aws_caller_info version_added: 1.0.0 @@ -20,20 +18,20 @@ author: - Stijn Dubrul (@sdubrul) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get the current caller identity information amazon.aws.aws_caller_info: register: caller_info -''' +""" -RETURN = ''' +RETURN = r""" account: description: The account id the access credentials are associated with. returned: success @@ -56,17 +54,18 @@ user_id: returned: success type: str sample: 123456789012:my-federated-user-name -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry def main(): @@ -75,34 +74,32 @@ def main(): supports_check_mode=True, ) - client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("sts", retry_decorator=AWSRetry.jittered_backoff()) try: caller_info = client.get_caller_identity(aws_retry=True) - caller_info.pop('ResponseMetadata', None) + caller_info.pop("ResponseMetadata", None) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve caller identity') + module.fail_json_aws(e, msg="Failed to retrieve caller identity") - iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + iam_client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: # Although a list is returned by list_account_aliases AWS supports maximum one alias per account. # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias. # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output response = iam_client.list_account_aliases(aws_retry=True) - if response and response['AccountAliases']: - caller_info['account_alias'] = response['AccountAliases'][0] + if response and response["AccountAliases"]: + caller_info["account_alias"] = response["AccountAliases"][0] else: - caller_info['account_alias'] = '' + caller_info["account_alias"] = "" except (BotoCoreError, ClientError): # The iam:ListAccountAliases permission is required for this operation to succeed. # Lacking this permission is handled gracefully by not returning the account_alias. pass - module.exit_json( - changed=False, - **camel_dict_to_snake_dict(caller_info)) + module.exit_json(changed=False, **camel_dict_to_snake_dict(caller_info)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_region_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_region_info.py new file mode 100644 index 000000000..ccec48bd9 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/aws_region_info.py @@ -0,0 +1,98 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +module: aws_region_info +short_description: Gather information about AWS regions +version_added: 1.0.0 +version_added_collection: community.aws +description: + - Gather information about AWS regions. +author: + - 'Henrique Rodrigues (@Sodki)' +options: + filters: + description: + - A dict of filters to apply. + - Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters. + - Filter names and values are case sensitive. + - You can use underscores instead of dashes (-) in the filter keys. + - Filter keys with underscores will take precedence in case of conflict. + default: {} + type: dict +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all regions +- amazon.aws.aws_region_info: + +# Gather information about a single region +- amazon.aws.aws_region_info: + filters: + region-name: eu-west-1 +""" + +RETURN = r""" +regions: + returned: on success + description: > + Regions that match the provided filters. Each element consists of a dict with all the information related + to that region. + type: list + sample: "[{ + 'endpoint': 'ec2.us-west-1.amazonaws.com', + 'region_name': 'us-west-1' + }]" +""" + +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + + +def main(): + argument_spec = dict( + filters=dict(default={}, type="dict"), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility + sanitized_filters = dict(module.params.get("filters")) + for k in module.params.get("filters").keys(): + if "_" in k: + sanitized_filters[k.replace("_", "-")] = sanitized_filters[k] + del sanitized_filters[k] + + try: + regions = connection.describe_regions( + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe regions.") + + module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions["Regions"]]) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_plan.py b/ansible_collections/amazon/aws/plugins/modules/backup_plan.py new file mode 100644 index 000000000..4fab240c7 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_plan.py @@ -0,0 +1,700 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_plan +version_added: 6.0.0 +short_description: Manage AWS Backup Plans +description: + - Creates, updates, or deletes AWS Backup Plans + - For more information see the AWS documentation for Backup plans U(https://docs.aws.amazon.com/aws-backup/latest/devguide/about-backup-plans.html). +author: + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) + - Helen Bailey (@hakbailey) +options: + state: + description: + - Create/update or delete a backup plan. + type: str + default: present + choices: ['present', 'absent'] + backup_plan_name: + description: + - The display name of a backup plan. Must contain 1 to 50 alphanumeric or '-_.' characters. + type: str + required: true + aliases: ['name'] + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + - Required when I(state=present). + type: list + elements: dict + suboptions: + rule_name: + description: Name of the rule. + type: str + required: true + target_backup_vault_name: + description: Name of the Backup Vault this rule should target. + type: str + required: true + schedule_expression: + description: A CRON expression in UTC specifying when Backup initiates a backup + job. AWS default is used if not supplied. + type: str + default: 'cron(0 5 ? * * *)' + start_window_minutes: + description: + - A value in minutes after a backup is scheduled before a job will be + canceled if it doesn't start successfully. If this value is included, it + must be at least 60 minutes to avoid errors. + - AWS default if not supplied is 480. + type: int + default: 480 + completion_window_minutes: + description: + - A value in minutes after a backup job is successfully started before it + must be completed or it will be canceled by Backup. + - AWS default if not supplied is 10080 + type: int + default: 10080 + lifecycle: + description: + - The lifecycle defines when a protected resource is transitioned to cold + storage and when it expires. Backup will transition and expire backups + automatically according to the lifecycle that you define. + - Backups transitioned to cold storage must be stored in cold storage for a + minimum of 90 days. Therefore, the "retention" setting must be 90 days + greater than the "transition to cold after days" setting. The "transition + to cold after days" setting cannot be changed after a backup has been + transitioned to cold. + type: dict + suboptions: + move_to_cold_storage_after_days: + description: Specifies the number of days after creation that a recovery point is moved to cold storage. + type: int + delete_after_days: + description: Specifies the number of days after creation that a recovery + point is deleted. Must be greater than 90 days plus + move_to_cold_storage_after_days. + type: int + recovery_point_tags: + description: To help organize your resources, you can assign your own metadata to the resources that you create. + type: dict + copy_actions: + description: An array of copy_action objects, which contains the details of the copy operation. + type: list + elements: dict + suboptions: + destination_backup_vault_arn: + description: An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. + type: str + required: true + lifecycle: + description: + - Contains an array of Transition objects specifying how long in days + before a recovery point transitions to cold storage or is deleted. + - Backups transitioned to cold storage must be stored in cold storage for + a minimum of 90 days. Therefore, on the console, the "retention" + setting must be 90 days greater than the "transition to cold after + days" setting. The "transition to cold after days" setting cannot be + changed after a backup has been transitioned to cold. + type: dict + suboptions: + move_to_cold_storage_after_days: + description: Specifies the number of days after creation that a + recovery point is moved to cold storage. + type: int + delete_after_days: + description: Specifies the number of days after creation that a + recovery point is deleted. Must be greater than 90 days plus + move_to_cold_storage_after_days. + type: int + enable_continuous_backup: + description: + - Specifies whether Backup creates continuous backups. True causes Backup to + create continuous backups capable of point-in-time restore (PITR). False + (or not specified) causes Backup to create snapshot backups. + - AWS default if not supplied is false. + type: bool + default: false + schedule_expression_timezone: + description: + - This is the timezone in which the schedule expression is set. + - By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone. + - This option requires botocore >= 1.31.36. + type: str + default: "Etc/UTC" + required: false + version_added: 7.3.0 + advanced_backup_settings: + description: + - Specifies a list of advanced backup settings for each resource type. + - These settings are only available for Windows Volume Shadow Copy Service (VSS) backup jobs. + required: false + type: list + elements: dict + suboptions: + resource_type: + description: + - Specifies an object containing resource type and backup options. + - The only supported resource type is Amazon EC2 instances with Windows Volume Shadow Copy Service (VSS). + type: str + choices: ['EC2'] + backup_options: + description: + - Specifies the backup option for a selected resource. + - This option is only available for Windows VSS backup jobs. + type: dict + choices: [{'WindowsVSS': 'enabled'}, {'WindowsVSS': 'disabled'}] + creator_request_id: + description: Identifies the request and allows failed requests to be retried + without the risk of running the operation twice. If the request includes a + CreatorRequestId that matches an existing backup plan, that plan is returned. + type: str + tags: + description: To help organize your resources, you can assign your own metadata to + the resources that you create. Each tag is a key-value pair. The specified tags + are assigned to all backups created with this plan. + type: dict + aliases: ['resource_tags', 'backup_plan_tags'] + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 + - amazon.aws.tags +""" + +EXAMPLES = r""" +- name: Create an AWSbackup plan + amazon.aws.backup_plan: + state: present + backup_plan_name: elastic + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: 'cron(0 5 ? * * *)' + start_window_minutes: 60 + completion_window_minutes: 1440 +- name: Delete an AWS Backup plan + amazon.aws.backup_plan: + backup_plan_name: elastic + state: absent +""" + +RETURN = r""" +exists: + description: Whether the resource exists. + returned: always + type: bool + sample: true +backup_plan_arn: + description: ARN of the backup plan. + returned: always + type: str + sample: arn:aws:backup:eu-central-1:111122223333:backup-plan:1111f877-1ecf-4d79-9718-a861cd09df3b +backup_plan_id: + description: ID of the backup plan. + returned: always + type: str + sample: 1111f877-1ecf-4d79-9718-a861cd09df3b +backup_plan_name: + description: Name of the backup plan. + returned: always + type: str + sample: elastic +creation_date: + description: Creation date of the backup plan. + returned: on create/update + type: str + sample: '2023-01-24T10:08:03.193000+01:00' +deletion_date: + description: Date the backup plan was deleted. + returned: on delete + type: str + sample: '2023-05-05T16:24:51.987000-04:00' +version_id: + description: Version ID of the backup plan. + returned: always + type: str + sample: ODM3MjVjNjItYWFkOC00NjExLWIwZTYtZDNiNGI5M2I0ZTY1 +backup_plan: + description: Backup plan details. + returned: on create/update + type: dict + contains: + backup_plan_name: + description: Name of the backup plan. + returned: always + type: str + sample: elastic + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + returned: always + type: list + elements: dict + contains: + rule_name: + description: A display name for a backup rule. + returned: always + type: str + sample: "daily" + target_backup_vault_name: + description: The name of a logical container where backups are stored. + returned: always + type: str + sample: 09da67966fd5-backup-vault" + schedule_expression: + description: A cron expression in UTC specifying when Backup initiates a backup job. + returned: always + type: str + sample: "cron(0 5 ? * * *)" + start_window_minutes: + description: + - A value in minutes after a backup is scheduled before a job will be canceled if it + doesn't start successfully. + type: int + sample: 480 + completion_window_minutes: + description: + - A value in minutes after a backup job is successfully started before it must be + completed or it will be canceled by Backup. + type: int + sample: 10080 + lifecycle: + description: + - The lifecycle defines when a protected resource is transitioned to cold storage and when + it expires. + type: dict + sample: {} + recovery_point_tags: + description: + - An array of key-value pair strings that are assigned to resources that are associated with + this rule when restored from backup. + type: dict + sample: {} + rule_id: + description: + - Uniquely identifies a rule that is used to schedule the backup of a selection of resources. + type: str + returned: always + sample: "973621ef-d863-41ef-b5c3-9e943a64ad0c" + copy_actions: + description: An array of CopyAction objects, which contains the details of the copy operation. + type: list + returned: always + sample: [] + enable_continous_backup: + description: Specifies whether Backup creates continuous backups. + type: bool + returned: always + sample: false + schedule_expression_timezone: + description: + - This is the timezone in which the schedule expression is set. + - This information is returned for botocore versions >= 1.31.36. + type: str + returned: when botocore >= 1.31.36 + sample: "Etc/UTC" + version_added: 7.3.0 + advanced_backup_settings: + description: Advanced backup settings of the backup plan. + returned: when configured + type: list + elements: dict + contains: + resource_type: + description: Resource type of the advanced settings. + type: str + backup_options: + description: Backup options of the advanced settings. + type: dict + tags: + description: Tags of the backup plan. + returned: on create/update + type: str +""" + +import json +from datetime import datetime +from typing import Optional + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +ARGUMENT_SPEC = dict( + state=dict(type="str", choices=["present", "absent"], default="present"), + backup_plan_name=dict(required=True, type="str", aliases=["name"]), + rules=dict( + type="list", + elements="dict", + options=dict( + rule_name=dict(required=True, type="str"), + target_backup_vault_name=dict(required=True, type="str"), + schedule_expression=dict(type="str", default="cron(0 5 ? * * *)"), + start_window_minutes=dict(type="int", default=480), + completion_window_minutes=dict(type="int", default=10080), + schedule_expression_timezone=dict(type="str", default="Etc/UTC"), + lifecycle=dict( + type="dict", + options=dict( + move_to_cold_storage_after_days=dict(type="int"), + delete_after_days=dict(type="int"), + ), + ), + recovery_point_tags=dict(type="dict"), + copy_actions=dict( + type="list", + elements="dict", + options=dict( + destination_backup_vault_arn=dict(required=True, type="str"), + lifecycle=dict( + type="dict", + options=dict( + move_to_cold_storage_after_days=dict(type="int"), + delete_after_days=dict(type="int"), + ), + ), + ), + ), + enable_continuous_backup=dict(type="bool", default=False), + ), + ), + advanced_backup_settings=dict( + type="list", + elements="dict", + options=dict( + resource_type=dict(type="str", choices=["EC2"]), + backup_options=dict( + type="dict", + choices=[{"WindowsVSS": "enabled"}, {"WindowsVSS": "disabled"}], + ), + ), + ), + creator_request_id=dict(type="str"), + tags=dict(type="dict", aliases=["backup_plan_tags", "resource_tags"]), + purge_tags=dict(default=True, type="bool"), +) + +REQUIRED_IF = [ + ("state", "present", ["backup_plan_name", "rules"]), + ("state", "absent", ["backup_plan_name"]), +] + +SUPPORTS_CHECK_MODE = True + + +def format_client_params( + module: AnsibleAWSModule, + plan: dict, + tags: Optional[dict] = None, + backup_plan_id: Optional[str] = None, + operation: Optional[str] = None, +) -> dict: + """ + Formats plan details to match boto3 backup client param expectations. + + module : AnsibleAWSModule object + plan: Dict of plan details including name, rules, and advanced settings + tags: Dict of plan tags + backup_plan_id: ID of backup plan to update, only needed for update operation + operation: Operation to add specific params for, either create or update + """ + params = { + "BackupPlan": snake_dict_to_camel_dict( + {k: v for k, v in plan.items() if v != "backup_plan_name"}, + capitalize_first=True, + ) + } + + if operation == "create": # Add create-specific params + if tags: + params["BackupPlanTags"] = tags + creator_request_id = module.params["creator_request_id"] + if creator_request_id: + params["CreatorRequestId"] = creator_request_id + + elif operation == "update": # Add update-specific params + params["BackupPlanId"] = backup_plan_id + + return params + + +def format_check_mode_response(plan_name: str, plan: dict, tags: dict, delete: bool = False) -> dict: + """ + Formats plan details in check mode to match result expectations. + + plan_name: Name of backup plan + plan: Dict of plan details including name, rules, and advanced settings + tags: Optional dict of plan tags + delete: Whether the response is for a delete action + """ + timestamp = datetime.now().isoformat() + if delete: + return { + "backup_plan_name": plan_name, + "backup_plan_id": "", + "backup_plan_arn": "", + "deletion_date": timestamp, + "version_id": "", + } + else: + return { + "backup_plan_name": plan_name, + "backup_plan_id": "", + "backup_plan_arn": "", + "creation_date": timestamp, + "version_id": "", + "backup_plan": { + "backup_plan_name": plan_name, + "rules": plan["rules"], + "advanced_backup_settings": plan["advanced_backup_settings"], + "tags": tags, + }, + } + + +def create_backup_plan(module: AnsibleAWSModule, client, create_params: dict) -> dict: + """ + Creates a backup plan. + + module : AnsibleAWSModule object + client : boto3 backup client connection object + create_params : The boto3 backup client parameters to create a backup plan + """ + try: + response = client.create_backup_plan(**create_params) + except ( + BotoCoreError, + ClientError, + ) as err: + module.fail_json_aws(err, msg="Failed to create backup plan {err}") + return response + + +def plan_update_needed(existing_plan: dict, new_plan: dict) -> bool: + """ + Determines whether existing and new plan rules/settings match. + + existing_plan: Dict of existing plan details including rules and advanced settings, + in snake-case format + new_plan: Dict of existing plan details including rules and advanced settings, in + snake-case format + """ + update_needed = False + + # Check whether rules match + existing_rules = json.dumps( + [{key: val for key, val in rule.items() if key != "rule_id"} for rule in existing_plan["backup_plan"]["rules"]], + sort_keys=True, + ) + new_rules = json.dumps(new_plan["rules"], sort_keys=True) + if not existing_rules or existing_rules != new_rules: + update_needed = True + + # Check whether advanced backup settings match + existing_advanced_backup_settings = json.dumps( + existing_plan["backup_plan"].get("advanced_backup_settings", []), + sort_keys=True, + ) + new_advanced_backup_settings = json.dumps(new_plan.get("advanced_backup_settings", []), sort_keys=True) + if existing_advanced_backup_settings != new_advanced_backup_settings: + update_needed = True + + return update_needed + + +def update_backup_plan(module: AnsibleAWSModule, client, update_params: dict) -> dict: + """ + Updates a backup plan. + + module : AnsibleAWSModule object + client : boto3 backup client connection object + update_params : The boto3 backup client parameters to update a backup plan + """ + try: + response = client.update_backup_plan(**update_params) + except ( + BotoCoreError, + ClientError, + ) as err: + module.fail_json_aws(err, msg="Failed to update backup plan {err}") + return response + + +def tag_backup_plan( + module: AnsibleAWSModule, + client, + new_tags: Optional[dict], + plan_arn: str, + current_tags: Optional[dict] = None, +): + """ + Creates, updates, and/or removes tags on a Backup Plan resource. + + module : AnsibleAWSModule object + client : boto3 client connection object + new_tags : Dict of tags converted from ansible_dict to boto3 list of dicts + plan_arn : The ARN of the Backup Plan to operate on + curr_tags : Dict of the current tags on resource, if any + """ + + if not new_tags and not current_tags: + return False + + if module.check_mode: + return True + + new_tags = new_tags or {} + current_tags = current_tags or {} + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, new_tags, purge_tags=module.params["purge_tags"]) + + if not tags_to_add and not tags_to_remove: + return False + + if tags_to_remove: + try: + client.untag_resource(ResourceArn=plan_arn, TagKeyList=tags_to_remove) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to remove tags from the plan") + + if tags_to_add: + try: + client.tag_resource(ResourceArn=plan_arn, Tags=tags_to_add) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to add tags to the plan") + + return True + + +def delete_backup_plan(module: AnsibleAWSModule, client, backup_plan_id: str) -> dict: + """ + Deletes a Backup Plan + + module : AnsibleAWSModule object + client : boto3 backup client connection object + backup_plan_id : ID (*not* name or ARN) of Backup plan to delete + """ + try: + response = client.delete_backup_plan(BackupPlanId=backup_plan_id) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to delete the Backup Plan") + return response + + +def main(): + module = AnsibleAWSModule( + argument_spec=ARGUMENT_SPEC, + required_if=REQUIRED_IF, + supports_check_mode=SUPPORTS_CHECK_MODE, + ) + + # Set initial result values + result = dict(changed=False, exists=False) + + # Get supplied params from module + client = module.client("backup") + state = module.params["state"] + plan_name = module.params["backup_plan_name"] + + plan = { + "backup_plan_name": module.params["backup_plan_name"], + "rules": [scrub_none_parameters(rule) for rule in module.params["rules"] or []], + "advanced_backup_settings": [ + scrub_none_parameters(setting) for setting in module.params["advanced_backup_settings"] or [] + ], + } + + if module.params["rules"]: + for each in plan["rules"]: + if not module.botocore_at_least("1.31.36"): + module.warn( + "schedule_expression_timezone requires botocore >= 1.31.36. schedule_expression_timezone will be ignored." + ) + each.pop("schedule_expression_timezone") + + tags = module.params["tags"] + + # Get existing backup plan details and ID if present + existing_plan = get_plan_details(module, client, plan_name) + if existing_plan: + existing_plan_id = existing_plan[0]["backup_plan_id"] + existing_plan = existing_plan[0] + else: + existing_plan = existing_plan_id = None + + if state == "present": # Create or update plan + if existing_plan_id is None: # Plan does not exist, create it + if module.check_mode: # Use supplied params as result data in check mode + backup_plan = format_check_mode_response(plan_name, plan, tags) + else: + client_params = format_client_params(module, plan, tags=tags, operation="create") + response = create_backup_plan(module, client, client_params) + backup_plan = get_plan_details(module, client, plan_name)[0] + result["exists"] = True + result["changed"] = True + result.update(backup_plan) + + else: # Plan exists, update as needed + result["exists"] = True + if plan_update_needed(existing_plan, plan): + if not module.check_mode: + client_params = format_client_params( + module, + plan, + backup_plan_id=existing_plan_id, + operation="update", + ) + update_backup_plan(module, client, client_params) + result["changed"] = True + if tag_backup_plan( + module, + client, + tags, + existing_plan["backup_plan_arn"], + existing_plan["tags"], + ): + result["changed"] = True + if module.check_mode: + backup_plan = format_check_mode_response(plan_name, plan, tags) + else: + backup_plan = get_plan_details(module, client, plan_name)[0] + result.update(backup_plan) + + elif state == "absent": # Delete plan + if existing_plan_id is None: # Plan does not exist, can't delete it + module.debug(msg=f"Backup plan {plan_name} not found.") + else: # Plan exists, delete it + if module.check_mode: + response = format_check_mode_response(plan_name, existing_plan, tags, True) + else: + response = delete_backup_plan(module, client, existing_plan_id) + result["changed"] = True + result["exists"] = False + result.update(camel_dict_to_snake_dict(response)) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_plan_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_plan_info.py new file mode 100644 index 000000000..096857d5b --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_plan_info.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_plan_info +version_added: 6.0.0 +short_description: Describe AWS Backup Plans +description: + - Lists info about Backup Plan configuration. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +options: + backup_plan_names: + type: list + elements: str + required: true + description: + - Specifies a list of plan names. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Gather information about all backup plans +- amazon.aws.backup_plan_info +# Gather information about a particular backup plan +- amazon.aws.backup_plan_info: + backup plan_names: + - elastic +""" + +RETURN = r""" +backup_plans: + description: List of backup plan objects. Each element consists of a dict with all the information related to that backup plan. + type: list + elements: dict + returned: always + contains: + backup_plan_arn: + description: ARN of the backup plan. + type: str + sample: arn:aws:backup:eu-central-1:111122223333:backup-plan:1111f877-1ecf-4d79-9718-a861cd09df3b + backup_plan_id: + description: Id of the backup plan. + type: str + sample: 1111f877-1ecf-4d79-9718-a861cd09df3b + backup_plan_name: + description: Name of the backup plan. + type: str + sample: elastic + creation_date: + description: Creation date of the backup plan. + type: str + sample: '2023-01-24T10:08:03.193000+01:00' + last_execution_date: + description: Last execution date of the backup plan. + type: str + sample: '2023-03-24T06:30:08.250000+01:00' + tags: + description: Tags of the backup plan + type: str + version_id: + description: Version id of the backup plan + type: str + backup_plan: + returned: always + description: Detailed information about the backup plan. + type: list + elements: dict + contains: + backup_plan_name: + description: Name of the backup plan. + type: str + sample: elastic + advanced_backup_settings: + description: Advanced backup settings of the backup plan + type: list + elements: dict + contains: + resource_type: + description: Resource type of the advanced setting + type: str + backup_options: + description: Options of the advanced setting + type: dict + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + type: list +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def get_backup_plan_detail(client, module): + backup_plan_list = [] + backup_plan_names = module.params.get("backup_plan_names") + + for name in backup_plan_names: + backup_plan_list.extend(get_plan_details(module, client, name)) + + module.exit_json(**{"backup_plans": backup_plan_list}) + + +def main(): + argument_spec = dict( + backup_plan_names=dict(type="list", elements="str", required=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + connection = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + get_backup_plan_detail(connection, module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_restore_job_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_restore_job_info.py new file mode 100644 index 000000000..c6ed71e7a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_restore_job_info.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: backup_restore_job_info +version_added: 6.0.0 +short_description: List information about backup restore jobs +description: + - List detailed information about AWS Backup restore jobs initiated to restore a saved resource. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + account_id: + description: + - The account ID to list the restore jobs from. + required: false + type: str + status: + description: + - Status of restore jobs to filter the result based on job status. + required: false + choices: ['PENDING', 'RUNNING', 'COMPLETED', 'ABORTED', 'FAILED'] + type: str + created_before: + description: + - Specified date to filter result based on the restore job creation datetime. + - If specified, only the restore jobs created before the specified datetime will be returned. + - The date must be in Unix format and Coordinated Universal Time (UTC), example "2023-02-25T00:05:36.309Z". + required: false + type: str + created_after: + description: + - Specified date to filter result based on the restore job creation datetime. + - If specified, only the restore jobs created after the specified datetime will be returned. + - The date must be in Unix format and Coordinated Universal Time (UTC), example "2023-02-25T00:05:36.309Z". + required: false + type: str + completed_before: + description: + - Specified date to filter result based on the restore job completion datetime. + - If specified, only the restore jobs created before the specified datetime will be returned. + - The date must be in Unix format and Coordinated Universal Time (UTC), example "2023-02-25T00:05:36.309Z". + required: false + type: str + completed_after: + description: + - Specified date to filter result based on the restore job completion datetime. + - If specified, only the restore jobs created after the specified datetime will be returned. + - The date must be in Unix format and Coordinated Universal Time (UTC), example "2023-02-25T00:05:36.309Z". + required: false + type: str + restore_job_id: + description: + - ID of the restore job to get information about. + - This parameter is mutually exlusive with all other parameters. + required: false + type: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: List all restore jobs + amazon.aws.backup_restore_job_info: + +- name: List specific restore job's info by job ID + amazon.aws.backup_restore_job_info: + restore_job_id: "52BEE289-xxxx-xxxx-xxxx-47DCAA2E7ACD" + +- name: List restore jobs based on Account ID + amazon.aws.backup_restore_job_info: + account_id: xx1234567890 + +- name: List restore jobs based on status and created_before time + amazon.aws.backup_restore_job_info: + status: completed + created_before: "2023-02-25T00:05:36.309Z" +""" + +RETURN = r""" +restore_jobs: + returned: always + description: + - restore jobs that match the provided filters. + - Each element consists of a dict with details related to that restore job. + type: list + elements: dict + contains: + account_id: + description: + - The account ID that owns the restore job. + type: str + returned: if restore job exists + sample: "123456789012" + created_resource_arn: + description: + - An Amazon Resource Name (ARN) that uniquely identifies a resource whose recovery point is being restored. + - The format of the ARN depends on the resource type of the backed-up resource. + type: str + returned: if restore job exists + sample: "arn:aws:ec2:us-east-2:xxxxxxxxxx..." + creation_date: + description: + - The date and time that a restore job is created, in Unix format and Coordinated Universal Time (UTC). + type: str + returned: if restore job exists + sample: "2023-03-13T15:53:07.172000-07:00" + iam_role_arn: + description: + - The IAM role ARN used to create the target recovery point. + type: str + returned: if restore job exists + sample: "arn:aws:ec2:us-east-2:xxxxxxxxxx..." + percent_done: + description: + - The estimated percentage that is complete of a job at the time the job status was queried. + type: str + returned: if restore job exists + sample: "0.00%" + recovery_point_arn: + description: + - An ARN that uniquely identifies a recovery point. + type: str + returned: if restore job exists + sample: "arn:aws:ec2:us-east-2:xxxxxxxxxx..." + restore_job_id: + description: + - The ID of the job that restores a recovery point. + type: str + returned: if restore job exists + sample: "AAAA1234-1D1D-1234-3F8E-1EB111EEEE00" + status: + description: + - The state of the job initiated by Backup to restore a recovery point. + type: str + returned: if restore job exists + sample: "COMPLETED" +""" + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def build_request_args(account_id, status, created_before, created_after, completed_before, completed_after): + request_args = { + "ByAccountId": account_id if account_id else "", + "ByStatus": status if status else "", + "ByCreatedBefore": created_before if created_before else "", + "ByCreatedAfter": created_after if created_after else "", + "ByCompleteBefore": completed_before if completed_before else "", + "ByCompleteAfter": completed_after if completed_after else "", + } + + request_args = {k: v for k, v in request_args.items() if v} + + return request_args + + +def _describe_restore_job(connection, module, restore_job_id): + try: + response = connection.describe_restore_job(RestoreJobId=restore_job_id) + response.pop("ResponseMetadata", None) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=f"Failed to describe restore job with ID: {restore_job_id}") + + return [camel_dict_to_snake_dict(response)] + + +@AWSRetry.jittered_backoff() +def _list_restore_jobs(connection, **params): + paginator = connection.get_paginator("list_restore_jobs") + return paginator.paginate(**params).build_full_result() + + +def list_restore_jobs(connection, module, request_args): + try: + response = _list_restore_jobs(connection, **request_args) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list restore jobs") + + return [camel_dict_to_snake_dict(restore_job) for restore_job in response["RestoreJobs"]] + + +def main(): + argument_spec = dict( + account_id=dict(required=False, type="str"), + status=dict(required=False, type="str", choices=["PENDING", "RUNNING", "COMPLETED", "ABORTED", "FAILED"]), + created_before=dict(required=False, type="str"), + created_after=dict(required=False, type="str"), + completed_before=dict(required=False, type="str"), + completed_after=dict(required=False, type="str"), + restore_job_id=dict(required=False, type="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + backup_client = module.client("backup") + + request_args = build_request_args( + account_id=module.params["account_id"], + status=module.params["status"], + created_before=module.params["created_before"], + created_after=module.params["created_after"], + completed_before=module.params["completed_before"], + completed_after=module.params["completed_after"], + ) + + if module.params.get("restore_job_id"): + restore_jobs = _describe_restore_job(backup_client, module, module.params.get("restore_job_id")) + else: + restore_jobs = list_restore_jobs(backup_client, module, request_args) + + module.exit_json(changed=False, restore_jobs=restore_jobs) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_selection.py b/ansible_collections/amazon/aws/plugins/modules/backup_selection.py new file mode 100644 index 000000000..ff78d0b68 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_selection.py @@ -0,0 +1,406 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +module: backup_selection +short_description: Create, delete and modify AWS Backup selection +version_added: 6.0.0 +description: + - Manages AWS Backup selections. + - For more information see the AWS documentation for backup selections + U(https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html). +options: + backup_plan_name: + description: + - Uniquely identifies the backup plan to be associated with the selection of resources. + required: true + type: str + aliases: + - plan_name + backup_selection_name: + description: + - The display name of a resource selection document. Must contain 1 to 50 alphanumeric or '-_.' characters. + required: true + type: str + aliases: + - selection_name + iam_role_arn: + description: + - The ARN of the IAM role that Backup uses to authenticate when backing up the target resource. + type: str + resources: + description: + - A list of Amazon Resource Names (ARNs) to assign to a backup plan. The maximum number of ARNs is 500 without wildcards, + or 30 ARNs with wildcards. If you need to assign many resources to a backup plan, consider a different resource selection + strategy, such as assigning all resources of a resource type or refining your resource selection using tags. + type: list + elements: str + list_of_tags: + description: + - A list of conditions that you define to assign resources to your backup plans using tags. + - Condition operators are case sensitive. + - When you specify more than one condition in I(list_of_tags), you assign all resources that match AT LEAST ONE condition (using OR logic). + type: list + elements: dict + suboptions: + condition_type: + description: + - An operation applied to a key-value pair used to assign resources to your backup plan. + - Condition only supports C(STRINGEQUALS). + type: str + condition_key: + description: + - The key in a key-value pair. + type: str + condition_value: + description: + - The value in a key-value pair. + type: str + not_resources: + description: + - A list of Amazon Resource Names (ARNs) to exclude from a backup plan. The maximum number of ARNs is 500 without wildcards, + or 30 ARNs with wildcards. If you need to exclude many resources from a backup plan, consider a different resource + selection strategy, such as assigning only one or a few resource types or refining your resource selection using tags. + type: list + elements: str + conditions: + description: + - A list of conditions (expressed as a dict) that you define to assign resources to your backup plans using tags. + - When you specify more than one condition in I(conditions), you only assign the resources that match ALL conditions (using AND logic). + - I(conditions) supports C(string_equals), C(string_like), C(string_not_equals), and C(string_not_like). I(list_of_tags) only supports C(string_equals). + type: dict + suboptions: + string_equals: + description: + - Filters the values of your tagged resources for only those resources that you tagged with the same value. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_like: + description: + - Filters the values of your tagged resources for matching tag values with the use of a wildcard character (*) anywhere in the string. + For example, "prod*" or "*rod*" matches the tag value "production". + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_not_equals: + description: + - Filters the values of your tagged resources for only those resources that you tagged that do not have the same value. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_not_like: + description: + - Filters the values of your tagged resources for non-matching tag values with the use of a wildcard character (*) anywhere in the string. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + state: + description: + - Create, delete a backup selection. + default: present + choices: ['present', 'absent'] + type: str +author: + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + + +EXAMPLES = r""" +- name: Create backup selection + amazon.aws.backup_selection: + selection_name: elastic + backup_plan_name: 1111f877-1ecf-4d79-9718-a861cd09df3b + iam_role_arn: arn:aws:iam::111122223333:role/system-backup + resources: + - arn:aws:elasticfilesystem:*:*:file-system/* +""" + + +RETURN = r""" +backup_selection: + description: Backup selection details. + returned: always + type: complex + contains: + backup_plan_id: + description: Backup plan id. + returned: always + type: str + sample: "1111f877-1ecf-4d79-9718-a861cd09df3b" + creation_date: + description: Backup plan creation date. + returned: always + type: str + sample: "2023-01-24T10:08:03.193000+01:00" + iam_role_arn: + description: The ARN of the IAM role that Backup uses. + returned: always + type: str + sample: "arn:aws:iam::111122223333:role/system-backup" + selection_id: + description: Backup selection id. + returned: always + type: str + sample: "1111c217-5d71-4a55-8728-5fc4e63d437b" + selection_name: + description: Backup selection name. + returned: always + type: str + sample: elastic + conditions: + description: List of conditions (expressed as a dict) that are defined to assign resources to the backup plan using tags. + returned: always + type: dict + sample: {} + list_of_tags: + description: Conditions defined to assign resources to the backup plans using tags. + returned: always + type: list + elements: dict + sample: [] + not_resources: + description: List of Amazon Resource Names (ARNs) that are excluded from the backup plan. + returned: always + type: list + sample: [] + resources: + description: List of Amazon Resource Names (ARNs) that are assigned to the backup plan. + returned: always + type: list + sample: [] +""" + +import json + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_selection_details +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def check_for_update(current_selection, backup_selection_data, iam_role_arn): + update_needed = False + if current_selection[0].get("IamRoleArn", None) != iam_role_arn: + update_needed = True + + fields_to_check = ["Resources", "ListOfTags", "NotResources", "Conditions"] + for field_name in fields_to_check: + field_value_from_aws = json.dumps(current_selection[0].get(field_name, []), sort_keys=True) + new_field_value = json.dumps(backup_selection_data.get(field_name, []), sort_keys=True) + if new_field_value != field_value_from_aws: + if field_name != "Conditions": + update_needed = True + elif not ( # Check that Conditions values are not both empty + field_value_from_aws + == '{"StringEquals": [], "StringLike": [], "StringNotEquals": [], "StringNotLike": []}' # Default AWS Conditions return value + and new_field_value == "[]" + ): + update_needed = True + return update_needed + + +def main(): + argument_spec = dict( + backup_selection_name=dict(type="str", required=True, aliases=["selection_name"]), + backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]), + iam_role_arn=dict(type="str"), + resources=dict(type="list", elements="str"), + conditions=dict( + type="dict", + options=dict( + string_equals=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_like=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_not_equals=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_not_like=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + ), + ), + not_resources=dict(type="list", elements="str"), + list_of_tags=dict( + type="list", + elements="dict", + options=dict( + condition_type=dict(type="str"), + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + state=dict(default="present", choices=["present", "absent"]), + ) + required_if = [ + ("state", "present", ["backup_selection_name", "backup_plan_name", "iam_role_arn"]), + ("state", "absent", ["backup_selection_name", "backup_plan_name"]), + ] + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) + state = module.params.get("state") + backup_selection_name = module.params.get("selection_name") + backup_plan_name = module.params.get("backup_plan_name") + iam_role_arn = module.params.get("iam_role_arn") + resources = module.params.get("resources") + list_of_tags = module.params.get("list_of_tags") + not_resources = module.params.get("not_resources") + conditions = module.params.get("conditions") + + try: + client = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + results = {"changed": False, "exists": False, "backup_selection": {}} + + current_selection = get_selection_details(module, client, backup_plan_name, backup_selection_name) + results["current_selection"] = current_selection + + if state == "present": + # build data specified by user + update_needed = False + backup_selection_data = {"SelectionName": backup_selection_name, "IamRoleArn": iam_role_arn} + if resources: + backup_selection_data["Resources"] = resources + if list_of_tags: + backup_selection_data["ListOfTags"] = snake_dict_to_camel_dict(list_of_tags, capitalize_first=True) + if not_resources: + backup_selection_data["NotResources"] = not_resources + if conditions: + backup_selection_data["Conditions"] = snake_dict_to_camel_dict(conditions, capitalize_first=True) + + if current_selection: + results["exists"] = True + update_needed = check_for_update(current_selection, backup_selection_data, iam_role_arn) + if update_needed: + if module.check_mode: + results["changed"] = True + module.exit_json(**results, msg="Would have created selection if not in check mode") + + try: + client.delete_backup_selection( + aws_retry=True, + SelectionId=current_selection[0]["SelectionId"], + BackupPlanId=current_selection[0]["BackupPlanId"], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete selection") + elif not update_needed: + results["exists"] = True + # state is present but backup vault doesnt exist + if not current_selection or update_needed: + results["changed"] = True + results["exists"] = True + plan = get_plan_details(module, client, backup_plan_name) + + if module.check_mode: + module.exit_json(**results, msg="Would have created selection if not in check mode") + try: + client.create_backup_selection( + BackupSelection=backup_selection_data, BackupPlanId=plan[0]["backup_plan_id"] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create selection") + + new_selection = get_selection_details(module, client, backup_plan_name, backup_selection_name) + results["backup_selection"] = camel_dict_to_snake_dict(*new_selection) + + elif state == "absent": + if current_selection: + results["changed"] = True + if module.check_mode: + module.exit_json(**results, msg="Would have deleted backup selection if not in check mode") + try: + client.delete_backup_selection( + aws_retry=True, + SelectionId=current_selection[0]["SelectionId"], + BackupPlanId=current_selection[0]["BackupPlanId"], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete selection") + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_selection_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_selection_info.py new file mode 100644 index 000000000..e9362e2ac --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_selection_info.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_selection_info +version_added: 6.0.0 +short_description: Describe AWS Backup Selections +description: + - Lists info about Backup Selection configuration for a given Backup Plan. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +options: + backup_plan_name: + description: + - Uniquely identifies the backup plan to be associated with the selection of resources. + required: true + type: str + aliases: + - plan_name + backup_selection_names: + description: + - Uniquely identifies the backup plan the selections should be listed for. + type: list + elements: str + aliases: + - selection_names +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Gather information about all backup selections + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + +- name: Gather information about a particular backup selection + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + backup_selection_names: + - "{{ backup_selection_name }}" +""" + +RETURN = r""" +backup_selections: + description: List of backup selection objects. Each element consists of a dict with all the information related to that backup selection. + type: list + elements: dict + returned: always + contains: + backup_plan_id: + description: Backup plan id. + returned: always + type: str + sample: "1111f877-1ecf-4d79-9718-a861cd09df3b" + creation_date: + description: Backup plan creation date. + returned: always + type: str + sample: "2023-01-24T10:08:03.193000+01:00" + iam_role_arn: + description: IAM role arn. + returned: always + type: str + sample: "arn:aws:iam::111122223333:role/system-backup" + selection_id: + description: Backup selection id. + returned: always + type: str + sample: "1111c217-5d71-4a55-8728-5fc4e63d437b" + selection_name: + description: Backup selection name. + returned: always + type: str + sample: elastic + conditions: + description: List of conditions (expressed as a dict) that are defined to assign resources to the backup plan using tags. + returned: always + type: dict + sample: {} + list_of_tags: + description: Conditions defined to assign resources to the backup plans using tags. + returned: always + type: list + elements: dict + sample: [] + not_resources: + description: List of Amazon Resource Names (ARNs) that are excluded from the backup plan. + returned: always + type: list + sample: [] + resources: + description: List of Amazon Resource Names (ARNs) that are assigned to the backup plan. + returned: always + type: list + sample: [] +""" + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_selection_details +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def main(): + argument_spec = dict( + backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]), + backup_selection_names=dict(type="list", elements="str", aliases=["selection_names"]), + ) + result = {} + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + client = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + result["backup_selections"] = get_selection_details( + module, client, module.params.get("backup_plan_name"), module.params.get("backup_selection_names") + ) + module.exit_json(**camel_dict_to_snake_dict(result)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_tag.py b/ansible_collections/amazon/aws/plugins/modules/backup_tag.py new file mode 100644 index 000000000..c06d5666e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_tag.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: backup_tag +version_added: 6.0.0 +short_description: Manage tags on backup plan, backup vault, recovery point +description: + - Create, list, update, remove tags on AWS backup resources such as backup plan, backup vault, and recovery point. + - Resources are referenced using ARN. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + resource: + description: + - The Amazon Resource Name (ARN) of the backup resource. + required: true + type: str + state: + description: + - Whether the tags should be present or absent on the resource. + default: present + choices: ['present', 'absent'] + type: str + tags: + description: + - A dictionary of tags to add or remove from the resource. + - If the value provided for a tag key is null and I(state=absent), the tag will be removed regardless of its current value. + type: dict + required: true + aliases: ['resource_tags'] + purge_tags: + description: + - Whether unspecified tags should be removed from the resource. + - Note that when combined with I(state=absent), specified tag keys are not purged regardless of its current value. + type: bool + default: false + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Add tags on a resource + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: present + tags: + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + test_tag_key_1: tag_tag_value_1 + test_tag_key_2: tag_tag_value_2 + +- name: Remove only specified tags on a resource + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: absent + tags: + CamelCaseKey: CamelCaseValue + +- name: Remove all tags except for specified tags + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: absent + tags: + test_tag_key_1: tag_tag_value_1 + test_tag_key_2: tag_tag_value_2 + purge_tags: true + +- name: Update value of tag key on a resource + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: present + tags: + test_tag_key_1: tag_tag_value_NEW_1 + +- name: Remove all of the tags on a resource + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: absent + tags: {} + purge_tags: true +""" + +RETURN = r""" +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +added_tags: + description: A dict of tags that were added to the resource + returned: When tags are added to the resource + type: dict +removed_tags: + description: A dict of tags that were removed from the resource + returned: When tags are removed from the resource + type: dict +""" + +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_backup_resource_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def manage_tags(module, backup_client): + result = {"changed": False} + + resource = module.params.get("resource") + tags = module.params.get("tags") + state = module.params.get("state") + purge_tags = module.params.get("purge_tags") + + current_tags = get_backup_resource_tags(module, backup_client, resource) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) + + remove_tags = {} + if state == "absent": + for key in tags: + if purge_tags is False and key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): + remove_tags[key] = current_tags[key] + + for key in tags_to_remove: + remove_tags[key] = current_tags[key] + + if remove_tags: + result["changed"] = True + result["removed_tags"] = remove_tags + if not module.check_mode: + try: + backup_client.untag_resource(ResourceArn=resource, TagKeyList=list(remove_tags.keys())) + except (BotoCoreError, ClientError) as remove_tag_error: + module.fail_json_aws( + remove_tag_error, + msg=f"Failed to remove tags {remove_tags} from resource {resource}", + ) + + if state == "present" and tags_to_add: + result["changed"] = True + result["added_tags"] = tags_to_add + if not module.check_mode: + try: + backup_client.tag_resource(ResourceArn=resource, Tags=tags_to_add) + except (BotoCoreError, ClientError) as set_tag_error: + module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {tags_to_add} on resource {resource}") + + result["tags"] = get_backup_resource_tags(module, backup_client, resource) + return result + + +def main(): + argument_spec = dict( + state=dict(default="present", choices=["present", "absent"]), + resource=dict(required=True, type="str"), + tags=dict(required=True, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=False, type="bool"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + backup_client = module.client("backup") + + result = {} + + result = manage_tags(module, backup_client) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_tag_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_tag_info.py new file mode 100644 index 000000000..91bd375ed --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_tag_info.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: backup_tag_info +version_added: 6.0.0 +short_description: List tags on AWS Backup resources +description: + - List tags on AWS backup resources such as backup plan, backup vault, and recovery point. + - Resources are referenced using ARN. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + resource: + description: + - The Amazon Resource Name (ARN) of the backup resource. + required: true + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: List tags on a resource + amazon.aws.backup_tag_info: + resource: "{{ backup_resource_arn }}" +""" + +RETURN = r""" +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +""" + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_backup_resource_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +def main(): + argument_spec = dict( + resource=dict(required=True, type="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + backup_client = module.client("backup") + + current_tags = get_backup_resource_tags(module, backup_client, module.params["resource"]) + + module.exit_json(changed=False, tags=current_tags) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_vault.py b/ansible_collections/amazon/aws/plugins/modules/backup_vault.py new file mode 100644 index 000000000..7fd2cb939 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_vault.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_vault +version_added: 6.0.0 +short_description: Manage AWS Backup Vaults +description: + - Creates, deletes, or lists Backup Vault configuration. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) +options: + state: + description: + - Add or remove Backup Vault configuration. + type: str + choices: ['present', 'absent'] + default: present + backup_vault_name: + description: + - Name for the Backup Vault. + - Names are unique to the account used to create them and the Amazon Web Services Region where they are created. + - They consist of letters, numbers, and hyphens. + type: str + required: true + encryption_key_arn: + description: + - The server-side encryption key that is used to protect the backups. + type: str + creator_request_id: + description: + - A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. + - If used, this parameter must contain 1 to 50 alphanumeric or "-_." characters. + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 + - amazon.aws.tags +""" + +EXAMPLES = r""" +- name: create backup vault + amazon.aws.backup_vault: + state: present + backup_vault_name: default-vault + encryption_key_arn: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + tags: + environment: dev + Name: default +""" + +RETURN = r""" +exists: + description: whether the resource exists + returned: always + type: bool + sample: true +backup_vault: + description: BackupVault resource details + returned: always + type: complex + sample: hash/dictionary of values + contains: + backup_vault_name: + description: The name of a logical container where backups are stored. + returned: success + type: str + sample: default-name + backup_vault_arn: + description: An Amazon Resource Name (ARN) that uniquely identifies a backup vault. + returned: success + type: str + sample: arn:aws:backup:us-east-1:123456789012:vault:aBackupVault + creation_date: + description: The date and time a backup vault is created, in Unix format and Coordinated Universal Time (UTC). + returned: success + type: str + sample: 1516925490.087 (represents Friday, January 26, 2018 12:11:30.087 AM). + tags: + description: hash/dictionary of tags applied to this resource + returned: success + type: dict + sample: {'environment': 'dev', 'Name': 'default'} +""" + + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_backup_resource_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + + +def create_backup_vault(module, client, params): + """ + Creates a Backup Vault + + module : AnsibleAWSModule object + client : boto3 client connection object + params : The parameters to create a backup vault + """ + resp = {} + params = {k: v for k, v in params.items() if v is not None} + try: + resp = client.create_backup_vault(**params) + except ( + BotoCoreError, + ClientError, + ) as err: + module.fail_json_aws(err, msg="Failed to create Backup Vault") + return resp + + +def tag_vault(module, client, tags, vault_arn, curr_tags=None, purge_tags=True): + """ + Creates, updates, removes tags on a Backup Vault resource + + module : AnsibleAWSModule object + client : boto3 client connection object + tags : Dict of tags converted from ansible_dict to boto3 list of dicts + vault_arn : The ARN of the Backup Vault to operate on + curr_tags : Dict of the current tags on resource, if any + purge_tags : true/false to determine if current tags will be retained or not + """ + + if tags is None: + return False + + curr_tags = curr_tags or {} + tags_to_add, tags_to_remove = compare_aws_tags(curr_tags, tags, purge_tags=purge_tags) + + if not tags_to_add and not tags_to_remove: + return False + + if module.check_mode: + return True + + if tags_to_remove: + try: + client.untag_resource(ResourceArn=vault_arn, TagKeyList=tags_to_remove) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to remove tags from the vault") + + if tags_to_add: + try: + client.tag_resource(ResourceArn=vault_arn, Tags=tags_to_add) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to add tags to Vault") + + return True + + +def get_vault_facts(module, client, vault_name): + """ + Describes existing vault in an account + + module : AnsibleAWSModule object + client : boto3 client connection object + vault_name : Name of the backup vault + """ + resp = None + # get Backup Vault info + try: + resp = client.describe_backup_vault(BackupVaultName=vault_name) + except is_boto3_error_code("AccessDeniedException"): + module.warn("Access Denied trying to describe backup vault") + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Unable to get vault facts") + + # Now check to see if our vault exists and get status and tags + if resp: + if resp.get("BackupVaultArn"): + resource = resp.get("BackupVaultArn") + resp["tags"] = get_backup_resource_tags(module, client, resource) + + # Check for non-existent values and populate with None + optional_vals = set( + [ + "S3KeyPrefix", + "SnsTopicName", + "SnsTopicARN", + "CloudWatchLogsLogGroupArn", + "CloudWatchLogsRoleArn", + "KmsKeyId", + ] + ) + for v in optional_vals - set(resp.keys()): + resp[v] = None + return resp + + else: + # vault doesn't exist return None + return None + + +def delete_backup_vault(module, client, vault_name): + """ + Delete a Backup Vault + + module : AnsibleAWSModule object + client : boto3 client connection object + vault_name : Backup Vault Name + """ + try: + client.delete_backup_vault(BackupVaultName=vault_name) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to delete the Backup Vault") + + +def main(): + argument_spec = dict( + state=dict(default="present", choices=["present", "absent"]), + backup_vault_name=dict(required=True, type="str"), + encryption_key_arn=dict(type="str", no_log=False), + creator_request_id=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + ) + + required_if = [ + ("state", "present", ["backup_vault_name"]), + ("state", "enabled", ["backup_vault_name"]), + ] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + + # collect parameters + if module.params["state"] in ("present", "enabled"): + state = "present" + elif module.params["state"] in ("absent", "disabled"): + state = "absent" + tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + ct_params = dict( + BackupVaultName=module.params["backup_vault_name"], + BackupVaultTags=module.params["tags"], + EncryptionKeyArn=module.params["encryption_key_arn"], + CreatorRequestId=module.params["creator_request_id"], + ) + + client = module.client("backup") + results = dict(changed=False, exists=False) + + # Get existing backup vault facts + try: + vault = get_vault_facts(module, client, ct_params["BackupVaultName"]) + except (BotoCoreError, ClientError) as err: + module.debug(f"Unable to get vault facts {err}") + + # If the vault exists set the result exists variable + if vault is not None: + results["exists"] = True + + if state == "absent" and results["exists"]: + # If Trail exists go ahead and delete + results["changed"] = True + results["exists"] = False + results["backupvault"] = dict() + if not module.check_mode: + delete_backup_vault(module, client, vault["BackupVaultName"]) + + elif state == "present" and not results["exists"]: + # Backup Vault doesn't exist just go create it + results["changed"] = True + results["exists"] = True + if not module.check_mode: + if tags: + ct_params["BackupVaultTags"] = tags + # If we aren't in check_mode then actually create it + create_backup_vault(module, client, ct_params) + + # Get facts for newly created Backup Vault + vault = get_vault_facts(module, client, ct_params["BackupVaultName"]) + + # If we are in check mode create a fake return structure for the newly created vault + if module.check_mode: + vault = dict() + vault.update(ct_params) + vault["EncryptionKeyArn"] = "" + vault["tags"] = tags + + elif state == "present" and results["exists"]: + # Check if we need to update tags on resource + tags_changed = tag_vault( + module, + client, + tags=tags, + vault_arn=vault["BackupVaultArn"], + curr_tags=vault["tags"], + purge_tags=purge_tags, + ) + if tags_changed: + updated_tags = dict() + if not purge_tags: + updated_tags = vault["tags"] + updated_tags.update(tags) + results["changed"] = True + vault["tags"] = updated_tags + + # Populate backup vault facts in output + + if vault: + results["vault"] = camel_dict_to_snake_dict(vault, ignore_list=["tags"]) + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_vault_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_vault_info.py new file mode 100644 index 000000000..3f186a883 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_vault_info.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_vault_info +version_added: 6.0.0 +short_description: Describe AWS Backup Vaults +description: + - Lists info about Backup Vault configuration. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) +options: + backup_vault_names: + type: list + elements: str + default: [] + description: + - Specifies a list of vault names. + - If an empty list is specified, information for the backup vaults in the current region is returned. + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all backup vaults +- amazon.aws.backup_vault_info + +# Gather information about a particular backup vault +- amazon.aws.backup_vault_info: + backup vault_names: + - "arn:aws:backup_vault:us-east-2:123456789012:backup_vault/defaultvault" +""" + +RETURN = r""" +backup_vaults: + description: List of backup vault objects. Each element consists of a dict with all the information related to that backup vault. + type: list + elements: dict + returned: always + contains: + backup_vault_name: + description: Name of the backup vault. + type: str + sample: "default vault" + backup_vault_arn: + description: ARN of the backup vault. + type: str + sample: "arn:aws:backup:us-west-2:111122223333:vault/1234abcd-12ab-34cd-56ef-1234567890ab" + encryption_key_arn: + description: The server-side encryption key that is used to protect the backups. + type: str + sample: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + creation_date: + description: The date and time a backup vault is created, in Unix format and Coordinated Universal Time (UTC). + type: str + sample: "1516925490.087 (represents Friday, January 26, 2018 12:11:30.087 AM)." + creator_request_id: + description: + - A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. + type: str + number_of_recovery_points: + description: The number of recovery points that are stored in a backup vault. + type: int + locked: + description: + - Indicates whether Backup Vault Lock is currently protecting the backup vault. + - True means that Vault Lock causes delete or update operations on the recovery points stored in the vault to fail. + type: bool + sample: true + min_retention_days: + description: + - The minimum retention period that the vault retains its recovery points. + - If this parameter is not specified, Vault Lock does not enforce a minimum retention period. + type: int + sample: 120 + max_retention_days: + description: + - The maximum retention period that the vault retains its recovery points. + - If this parameter is not specified, Vault Lock does not enforce a maximum retention period (allowing indefinite storage). + type: int + sample: 123 + lock_date: + description: The date and time when Backup Vault Lock configuration cannot be changed or deleted. + type: str + sample: "1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM." + +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_backup_resource_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +def get_backup_vaults(connection, module): + all_backup_vaults = [] + try: + result = connection.get_paginator("list_backup_vaults") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get the backup vaults.") + for backup_vault in result.paginate(): + all_backup_vaults.extend(list_backup_vaults(backup_vault)) + return all_backup_vaults + + +def list_backup_vaults(backup_vault_dict): + return [x["BackupVaultName"] for x in backup_vault_dict["BackupVaultList"]] + + +def get_backup_vault_detail(connection, module): + output = [] + result = {} + backup_vault_name_list = module.params.get("backup_vault_names") + if not backup_vault_name_list: + backup_vault_name_list = get_backup_vaults(connection, module) + for name in backup_vault_name_list: + try: + output.append(connection.describe_backup_vault(BackupVaultName=name, aws_retry=True)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=f"Failed to describe vault {name}") + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_backup_vault = [] + for backup_vault in output: + try: + resource = backup_vault.get("BackupVaultArn", None) + tag_dict = get_backup_resource_tags(module, connection, resource) + backup_vault.update({"tags": tag_dict}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.warn(f"Failed to get the backup vault tags - {e}") + snaked_backup_vault.append(camel_dict_to_snake_dict(backup_vault)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for v in snaked_backup_vault: + if "tags_list" in v: + v["tags"] = boto3_tag_list_to_ansible_dict(v["tags_list"], "key", "value") + del v["tags_list"] + if "response_metadata" in v: + del v["response_metadata"] + result["backup_vaults"] = snaked_backup_vault + return result + + +def main(): + argument_spec = dict( + backup_vault_names=dict(type="list", elements="str", default=[]), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + connection = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + result = get_backup_vault_detail(connection, module) + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py index f953a75d2..ae2e78068 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudformation version_added: 1.0.0 @@ -163,12 +160,12 @@ options: author: - "James S. Martin (@jsmartin)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: create a cloudformation stack amazon.aws.cloudformation: stack_name: "ansible-cloudformation" @@ -244,10 +241,10 @@ EXAMPLES = ''' template: "files/cloudformation-example.json" template_parameters: DBSnapshotIdentifier: - use_previous_value: True + use_previous_value: true value: arn:aws:rds:es-east-1:123456789012:snapshot:rds:my-db-snapshot DBName: - use_previous_value: True + use_previous_value: true tags: Stack: "ansible-cloudformation" @@ -280,14 +277,17 @@ EXAMPLES = ''' state: present template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template on_create_failure: DELETE -''' +""" -RETURN = ''' +RETURN = r""" events: type: list description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. returned: always - sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"] + sample: [ + "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", + "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" + ] log: description: Debugging logs. Useful when modifying or finding an error. returned: always @@ -317,7 +317,7 @@ stack_outputs: description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary. returned: state == present sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"} -''' # NOQA +""" import json import time @@ -333,11 +333,11 @@ except ImportError: from ansible.module_utils._text import to_bytes from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list # Set a default, mostly for our integration tests. This will be overridden in # the main() loop to match the parameters we're passed @@ -345,63 +345,65 @@ retry_decorator = AWSRetry.jittered_backoff() def get_stack_events(cfn, stack_name, events_limit, token_filter=None): - '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.''' - ret = {'events': [], 'log': []} + """This event data was never correct, it worked as a side effect. So the v2.3 format is different.""" + ret = {"events": [], "log": []} try: - pg = cfn.get_paginator( - 'describe_stack_events' - ).paginate( - StackName=stack_name, - PaginationConfig={'MaxItems': events_limit} + pg = cfn.get_paginator("describe_stack_events").paginate( + StackName=stack_name, PaginationConfig={"MaxItems": events_limit} ) if token_filter is not None: - events = list(retry_decorator(pg.search)( - "StackEvents[?ClientRequestToken == '{0}']".format(token_filter) - )) + events = list(retry_decorator(pg.search)(f"StackEvents[?ClientRequestToken == '{token_filter}']")) else: events = list(pg.search("StackEvents[*]")) - except is_boto3_error_message('does not exist'): - ret['log'].append('Stack does not exist.') + except is_boto3_error_message("does not exist"): + ret["log"].append("Stack does not exist.") return ret - except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ValidationError, + botocore.exceptions.ClientError, + ) as err: # pylint: disable=duplicate-except error_msg = boto_exception(err) - ret['log'].append('Unknown error: ' + str(error_msg)) + ret["log"].append("Unknown error: " + str(error_msg)) return ret for e in events: - eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e) - ret['events'].append(eventline) + eventline = f"StackEvent {e['ResourceType']} {e['LogicalResourceId']} {e['ResourceStatus']}" + ret["events"].append(eventline) - if e['ResourceStatus'].endswith('FAILED'): - failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e) - ret['log'].append(failline) + if e["ResourceStatus"].endswith("FAILED"): + failure = f"{e['ResourceType']} {e['LogicalResourceId']} {e['ResourceStatus']}: {e['ResourceStatusReason']}" + ret["log"].append(failure) return ret def create_stack(module, stack_params, cfn, events_limit): - if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: - module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.") + if "TemplateBody" not in stack_params and "TemplateURL" not in stack_params: + module.fail_json( + msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist." + ) - # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and + # 'TimeoutInMinutes', 'EnableTerminationProtection' and # 'OnFailure' only apply on creation, not update. - if module.params.get('on_create_failure') is not None: - stack_params['OnFailure'] = module.params['on_create_failure'] + if module.params.get("on_create_failure") is not None: + stack_params["OnFailure"] = module.params["on_create_failure"] else: - stack_params['DisableRollback'] = module.params['disable_rollback'] + stack_params["DisableRollback"] = module.params["disable_rollback"] - if module.params.get('create_timeout') is not None: - stack_params['TimeoutInMinutes'] = module.params['create_timeout'] - if module.params.get('termination_protection') is not None: - stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection')) + if module.params.get("create_timeout") is not None: + stack_params["TimeoutInMinutes"] = module.params["create_timeout"] + if module.params.get("termination_protection") is not None: + stack_params["EnableTerminationProtection"] = bool(module.params.get("termination_protection")) try: response = cfn.create_stack(aws_retry=True, **stack_params) # Use stack ID to follow stack state in case of on_create_failure = DELETE - result = stack_operation(module, cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None)) + result = stack_operation( + module, cfn, response["StackId"], "CREATE", events_limit, stack_params.get("ClientRequestToken", None) + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: - module.fail_json_aws(err, msg="Failed to create stack {0}".format(stack_params.get('StackName'))) + module.fail_json_aws(err, msg=f"Failed to create stack {stack_params.get('StackName')}") if not result: module.fail_json(msg="empty result") return result @@ -409,43 +411,47 @@ def create_stack(module, stack_params, cfn, events_limit): def list_changesets(cfn, stack_name): res = cfn.list_change_sets(aws_retry=True, StackName=stack_name) - return [cs['ChangeSetName'] for cs in res['Summaries']] + return [cs["ChangeSetName"] for cs in res["Summaries"]] def create_changeset(module, stack_params, cfn, events_limit): - if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + if "TemplateBody" not in stack_params and "TemplateURL" not in stack_params: module.fail_json(msg="Either 'template' or 'template_url' is required.") - if module.params['changeset_name'] is not None: - stack_params['ChangeSetName'] = module.params['changeset_name'] + if module.params["changeset_name"] is not None: + stack_params["ChangeSetName"] = module.params["changeset_name"] # changesets don't accept ClientRequestToken parameters - stack_params.pop('ClientRequestToken', None) + stack_params.pop("ClientRequestToken", None) try: changeset_name = build_changeset_name(stack_params) - stack_params['ChangeSetName'] = changeset_name + stack_params["ChangeSetName"] = changeset_name # Determine if this changeset already exists - pending_changesets = list_changesets(cfn, stack_params['StackName']) + pending_changesets = list_changesets(cfn, stack_params["StackName"]) if changeset_name in pending_changesets: - warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets) - result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning]) + warning = f"WARNING: {len(pending_changesets)} pending changeset(s) exist(s) for this stack!" + result = dict(changed=False, output=f"ChangeSet {changeset_name} already exists.", warnings=[warning]) else: cs = cfn.create_change_set(aws_retry=True, **stack_params) # Make sure we don't enter an infinite loop time_end = time.time() + 600 while time.time() < time_end: try: - newcs = cfn.describe_change_set(aws_retry=True, ChangeSetName=cs['Id']) + newcs = cfn.describe_change_set(aws_retry=True, ChangeSetName=cs["Id"]) except botocore.exceptions.BotoCoreError as err: module.fail_json_aws(err) - if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS': + if newcs["Status"] == "CREATE_PENDING" or newcs["Status"] == "CREATE_IN_PROGRESS": time.sleep(1) - elif newcs['Status'] == 'FAILED' and ("The submitted information didn't contain changes" in newcs['StatusReason'] - or "No updates are to be performed" in newcs['StatusReason']): - cfn.delete_change_set(aws_retry=True, ChangeSetName=cs['Id']) - result = dict(changed=False, - output='The created Change Set did not contain any changes to this stack and was deleted.') + elif newcs["Status"] == "FAILED" and ( + "The submitted information didn't contain changes" in newcs["StatusReason"] + or "No updates are to be performed" in newcs["StatusReason"] + ): + cfn.delete_change_set(aws_retry=True, ChangeSetName=cs["Id"]) + result = dict( + changed=False, + output="The created Change Set did not contain any changes to this stack and was deleted.", + ) # a failed change set does not trigger any stack events so we just want to # skip any further processing of result and just return it directly return result @@ -453,15 +459,17 @@ def create_changeset(module, stack_params, cfn, events_limit): break # Lets not hog the cpu/spam the AWS API time.sleep(1) - result = stack_operation(module, cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit) - result['change_set_id'] = cs['Id'] - result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']), - 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'], - 'NOTE that dependencies on this stack might fail due to pending changes!'] - except is_boto3_error_message('No updates are to be performed.'): - result = dict(changed=False, output='Stack is already up-to-date.') + result = stack_operation(module, cfn, stack_params["StackName"], "CREATE_CHANGESET", events_limit) + result["change_set_id"] = cs["Id"] + result["warnings"] = [ + f"Created changeset named {changeset_name} for stack {stack_params['StackName']}", + f"You can execute it using: aws cloudformation execute-change-set --change-set-name {cs['Id']}", + "NOTE that dependencies on this stack might fail due to pending changes!", + ] + except is_boto3_error_message("No updates are to be performed."): + result = dict(changed=False, output="Stack is already up-to-date.") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: - module.fail_json_aws(err, msg='Failed to create change set') + module.fail_json_aws(err, msg="Failed to create change set") if not result: module.fail_json(msg="empty result") @@ -469,127 +477,137 @@ def create_changeset(module, stack_params, cfn, events_limit): def update_stack(module, stack_params, cfn, events_limit): - if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: - stack_params['UsePreviousTemplate'] = True + if "TemplateBody" not in stack_params and "TemplateURL" not in stack_params: + stack_params["UsePreviousTemplate"] = True + + if module.params["stack_policy_on_update_body"] is not None: + stack_params["StackPolicyDuringUpdateBody"] = module.params["stack_policy_on_update_body"] - if module.params['stack_policy_on_update_body'] is not None: - stack_params['StackPolicyDuringUpdateBody'] = module.params['stack_policy_on_update_body'] + stack_params["DisableRollback"] = module.params["disable_rollback"] # if the state is present and the stack already exists, we try to update it. # AWS will tell us if the stack template and parameters are the same and # don't need to be updated. try: cfn.update_stack(aws_retry=True, **stack_params) - result = stack_operation(module, cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None)) - except is_boto3_error_message('No updates are to be performed.'): - result = dict(changed=False, output='Stack is already up-to-date.') + result = stack_operation( + module, cfn, stack_params["StackName"], "UPDATE", events_limit, stack_params.get("ClientRequestToken", None) + ) + except is_boto3_error_message("No updates are to be performed."): + result = dict(changed=False, output="Stack is already up-to-date.") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: - module.fail_json_aws(err, msg="Failed to update stack {0}".format(stack_params.get('StackName'))) + module.fail_json_aws(err, msg=f"Failed to update stack {stack_params.get('StackName')}") if not result: module.fail_json(msg="empty result") return result def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state): - '''updates termination protection of a stack''' + """updates termination protection of a stack""" stack = get_stack_facts(module, cfn, stack_name) if stack: - if stack['EnableTerminationProtection'] is not desired_termination_protection_state: + if stack["EnableTerminationProtection"] is not desired_termination_protection_state: try: cfn.update_termination_protection( aws_retry=True, EnableTerminationProtection=desired_termination_protection_state, - StackName=stack_name) + StackName=stack_name, + ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) def stack_operation(module, cfn, stack_name, operation, events_limit, op_token=None): - '''gets the status of a stack while it is created/updated/deleted''' + """gets the status of a stack while it is created/updated/deleted""" existed = [] while True: try: stack = get_stack_facts(module, cfn, stack_name, raise_errors=True) - existed.append('yes') + existed.append("yes") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError): # If the stack previously existed, and now can't be found then it's # been deleted successfully. - if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + if "yes" in existed or operation == "DELETE": # stacks may delete fast, look in a few ways. ret = get_stack_events(cfn, stack_name, events_limit, op_token) - ret.update({'changed': True, 'output': 'Stack Deleted'}) + ret.update({"changed": True, "output": "Stack Deleted"}) return ret else: - return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()} + return { + "changed": True, + "failed": True, + "output": "Stack Not Found", + "exception": traceback.format_exc(), + } ret = get_stack_events(cfn, stack_name, events_limit, op_token) if not stack: - if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + if "yes" in existed or operation == "DELETE": # stacks may delete fast, look in a few ways. ret = get_stack_events(cfn, stack_name, events_limit, op_token) - ret.update({'changed': True, 'output': 'Stack Deleted'}) + ret.update({"changed": True, "output": "Stack Deleted"}) return ret else: - ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'}) + ret.update({"changed": False, "failed": True, "output": "Stack not found."}) return ret # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13 - elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET': - ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation}) + elif stack["StackStatus"].endswith("ROLLBACK_COMPLETE") and operation != "CREATE_CHANGESET": + ret.update({"changed": True, "failed": True, "output": f"Problem with {operation}. Rollback complete"}) return ret - elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE': - ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'}) + elif stack["StackStatus"] == "DELETE_COMPLETE" and operation == "CREATE": + ret.update({"changed": True, "failed": True, "output": "Stack create failed. Delete complete."}) return ret # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases. - elif stack['StackStatus'].endswith('_COMPLETE'): - ret.update({'changed': True, 'output': 'Stack %s complete' % operation}) + elif stack["StackStatus"].endswith("_COMPLETE"): + ret.update({"changed": True, "output": f"Stack {operation} complete"}) return ret - elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'): - ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation}) + elif stack["StackStatus"].endswith("_ROLLBACK_FAILED"): + ret.update({"changed": True, "failed": True, "output": f"Stack {operation} rollback failed"}) return ret # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases. - elif stack['StackStatus'].endswith('_FAILED'): - ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation}) + elif stack["StackStatus"].endswith("_FAILED"): + ret.update({"changed": True, "failed": True, "output": f"Stack {operation} failed"}) return ret else: # this can loop forever :/ time.sleep(5) - return {'failed': True, 'output': 'Failed for unknown reasons.'} + return {"failed": True, "output": "Failed for unknown reasons."} def build_changeset_name(stack_params): - if 'ChangeSetName' in stack_params: - return stack_params['ChangeSetName'] + if "ChangeSetName" in stack_params: + return stack_params["ChangeSetName"] json_params = json.dumps(stack_params, sort_keys=True) - return 'Ansible-{0}-{1}'.format( - stack_params['StackName'], - sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest() - ) + changeset_sha = sha1(to_bytes(json_params, errors="surrogate_or_strict")).hexdigest() + return f"Ansible-{stack_params['StackName']}-{changeset_sha}" def check_mode_changeset(module, stack_params, cfn): """Create a change set, describe it and delete it before returning check mode outputs.""" - stack_params['ChangeSetName'] = build_changeset_name(stack_params) + stack_params["ChangeSetName"] = build_changeset_name(stack_params) # changesets don't accept ClientRequestToken parameters - stack_params.pop('ClientRequestToken', None) + stack_params.pop("ClientRequestToken", None) try: change_set = cfn.create_change_set(aws_retry=True, **stack_params) for _i in range(60): # total time 5 min - description = cfn.describe_change_set(aws_retry=True, ChangeSetName=change_set['Id']) - if description['Status'] in ('CREATE_COMPLETE', 'FAILED'): + description = cfn.describe_change_set(aws_retry=True, ChangeSetName=change_set["Id"]) + if description["Status"] in ("CREATE_COMPLETE", "FAILED"): break time.sleep(5) else: # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail - module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName']) + module.fail_json(msg=f"Failed to create change set {stack_params['ChangeSetName']}") - cfn.delete_change_set(aws_retry=True, ChangeSetName=change_set['Id']) + cfn.delete_change_set(aws_retry=True, ChangeSetName=change_set["Id"]) - reason = description.get('StatusReason') + reason = description.get("StatusReason") - if description['Status'] == 'FAILED' and ("didn't contain changes" in reason or "No updates are to be performed" in reason): - return {'changed': False, 'msg': reason, 'meta': reason} - return {'changed': True, 'msg': reason, 'meta': description['Changes']} + if description["Status"] == "FAILED" and ( + "didn't contain changes" in reason or "No updates are to be performed" in reason + ): + return {"changed": False, "msg": reason, "meta": reason} + return {"changed": True, "msg": reason, "meta": description["Changes"]} except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: module.fail_json_aws(err) @@ -598,16 +616,19 @@ def check_mode_changeset(module, stack_params, cfn): def get_stack_facts(module, cfn, stack_name, raise_errors=False): try: stack_response = cfn.describe_stacks(aws_retry=True, StackName=stack_name) - stack_info = stack_response['Stacks'][0] - except is_boto3_error_message('does not exist'): + stack_info = stack_response["Stacks"][0] + except is_boto3_error_message("does not exist"): return None - except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ValidationError, + botocore.exceptions.ClientError, + ) as err: # pylint: disable=duplicate-except if raise_errors: raise err module.fail_json_aws(err, msg="Failed to describe stack") - if stack_response and stack_response.get('Stacks', None): - stacks = stack_response['Stacks'] + if stack_response and stack_response.get("Stacks", None): + stacks = stack_response["Stacks"] if len(stacks): stack_info = stacks[0] @@ -617,178 +638,193 @@ def get_stack_facts(module, cfn, stack_name, raise_errors=False): def main(): argument_spec = dict( stack_name=dict(required=True), - template_parameters=dict(required=False, type='dict', default={}), - state=dict(default='present', choices=['present', 'absent']), - template=dict(default=None, required=False, type='path'), + template_parameters=dict(required=False, type="dict", default={}), + state=dict(default="present", choices=["present", "absent"]), + template=dict(default=None, required=False, type="path"), notification_arns=dict(default=None, required=False), stack_policy=dict(default=None, required=False), - stack_policy_body=dict(default=None, required=False, type='json'), - stack_policy_on_update_body=dict(default=None, required=False, type='json'), - disable_rollback=dict(default=False, type='bool'), - on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), - create_timeout=dict(default=None, type='int'), + stack_policy_body=dict(default=None, required=False, type="json"), + stack_policy_on_update_body=dict(default=None, required=False, type="json"), + disable_rollback=dict(default=False, type="bool"), + on_create_failure=dict(default=None, required=False, choices=["DO_NOTHING", "ROLLBACK", "DELETE"]), + create_timeout=dict(default=None, type="int"), template_url=dict(default=None, required=False), template_body=dict(default=None, required=False), - create_changeset=dict(default=False, type='bool'), + create_changeset=dict(default=False, type="bool"), changeset_name=dict(default=None, required=False), role_arn=dict(default=None, required=False), - tags=dict(default=None, type='dict'), - termination_protection=dict(default=None, type='bool'), - events_limit=dict(default=200, type='int'), - backoff_retries=dict(type='int', default=10, required=False), - backoff_delay=dict(type='int', default=3, required=False), - backoff_max_delay=dict(type='int', default=30, required=False), - capabilities=dict(type='list', elements='str', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']) + tags=dict(default=None, type="dict"), + termination_protection=dict(default=None, type="bool"), + events_limit=dict(default=200, type="int"), + backoff_retries=dict(type="int", default=10, required=False), + backoff_delay=dict(type="int", default=3, required=False), + backoff_max_delay=dict(type="int", default=30, required=False), + capabilities=dict(type="list", elements="str", default=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['template_url', 'template', 'template_body'], - ['disable_rollback', 'on_create_failure']], - supports_check_mode=True + mutually_exclusive=[["template_url", "template", "template_body"], ["disable_rollback", "on_create_failure"]], + supports_check_mode=True, ) invalid_capabilities = [] - user_capabilities = module.params.get('capabilities') + user_capabilities = module.params.get("capabilities") for user_cap in user_capabilities: - if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']: + if user_cap not in ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND"]: invalid_capabilities.append(user_cap) if invalid_capabilities: - module.fail_json(msg="Specified capabilities are invalid : %r," - " please check documentation for valid capabilities" % invalid_capabilities) + module.fail_json( + msg=f"Specified capabilities are invalid : {invalid_capabilities!r}, please check documentation for valid capabilities" + ) # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = { - 'Capabilities': user_capabilities, - 'ClientRequestToken': to_native(uuid.uuid4()), + "Capabilities": user_capabilities, + "ClientRequestToken": to_native(uuid.uuid4()), } - state = module.params['state'] - stack_params['StackName'] = module.params['stack_name'] - - if module.params['template'] is not None: - with open(module.params['template'], 'r') as template_fh: - stack_params['TemplateBody'] = template_fh.read() - elif module.params['template_body'] is not None: - stack_params['TemplateBody'] = module.params['template_body'] - elif module.params['template_url'] is not None: - stack_params['TemplateURL'] = module.params['template_url'] - - if module.params.get('notification_arns'): - stack_params['NotificationARNs'] = module.params['notification_arns'].split(',') + state = module.params["state"] + stack_params["StackName"] = module.params["stack_name"] + + if module.params["template"] is not None: + with open(module.params["template"], "r") as template_fh: + stack_params["TemplateBody"] = template_fh.read() + elif module.params["template_body"] is not None: + stack_params["TemplateBody"] = module.params["template_body"] + elif module.params["template_url"] is not None: + stack_params["TemplateURL"] = module.params["template_url"] + + if module.params.get("notification_arns"): + stack_params["NotificationARNs"] = module.params["notification_arns"].split(",") else: - stack_params['NotificationARNs'] = [] + stack_params["NotificationARNs"] = [] # can't check the policy when verifying. - if module.params['stack_policy_body'] is not None and not module.check_mode and not module.params['create_changeset']: - stack_params['StackPolicyBody'] = module.params['stack_policy_body'] - elif module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']: - with open(module.params['stack_policy'], 'r') as stack_policy_fh: - stack_params['StackPolicyBody'] = stack_policy_fh.read() - - template_parameters = module.params['template_parameters'] - - stack_params['Parameters'] = [] + if ( + module.params["stack_policy_body"] is not None + and not module.check_mode + and not module.params["create_changeset"] + ): + stack_params["StackPolicyBody"] = module.params["stack_policy_body"] + elif module.params["stack_policy"] is not None and not module.check_mode and not module.params["create_changeset"]: + with open(module.params["stack_policy"], "r") as stack_policy_fh: + stack_params["StackPolicyBody"] = stack_policy_fh.read() + + template_parameters = module.params["template_parameters"] + + stack_params["Parameters"] = [] for k, v in template_parameters.items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) - if 'value' in v: - param['ParameterValue'] = str(v['value']) + if "value" in v: + param["ParameterValue"] = str(v["value"]) - if 'use_previous_value' in v and bool(v['use_previous_value']): - param['UsePreviousValue'] = True - param.pop('ParameterValue', None) + if "use_previous_value" in v and bool(v["use_previous_value"]): + param["UsePreviousValue"] = True + param.pop("ParameterValue", None) - stack_params['Parameters'].append(param) + stack_params["Parameters"].append(param) else: # allow default k/v configuration to set a template parameter - stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + stack_params["Parameters"].append({"ParameterKey": k, "ParameterValue": str(v)}) - if isinstance(module.params.get('tags'), dict): - stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + if isinstance(module.params.get("tags"), dict): + stack_params["Tags"] = ansible_dict_to_boto3_tag_list(module.params["tags"]) - if module.params.get('role_arn'): - stack_params['RoleARN'] = module.params['role_arn'] + if module.params.get("role_arn"): + stack_params["RoleARN"] = module.params["role_arn"] result = {} # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes retry_decorator = AWSRetry.jittered_backoff( - retries=module.params.get('backoff_retries'), - delay=module.params.get('backoff_delay'), - max_delay=module.params.get('backoff_max_delay') + retries=module.params.get("backoff_retries"), + delay=module.params.get("backoff_delay"), + max_delay=module.params.get("backoff_max_delay"), ) - cfn = module.client('cloudformation', retry_decorator=retry_decorator) + cfn = module.client("cloudformation", retry_decorator=retry_decorator) - stack_info = get_stack_facts(module, cfn, stack_params['StackName']) + stack_info = get_stack_facts(module, cfn, stack_params["StackName"]) if module.check_mode: - if state == 'absent' and stack_info: - module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) - elif state == 'absent' and not stack_info: - module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) - elif state == 'present' and not stack_info: - module.exit_json(changed=True, msg='New stack would be created', meta=[]) + if state == "absent" and stack_info: + module.exit_json(changed=True, msg="Stack would be deleted", meta=[]) + elif state == "absent" and not stack_info: + module.exit_json(changed=False, msg="Stack doesn't exist", meta=[]) + elif state == "present" and not stack_info: + module.exit_json(changed=True, msg="New stack would be created", meta=[]) else: module.exit_json(**check_mode_changeset(module, stack_params, cfn)) - if state == 'present': + if state == "present": if not stack_info: - result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) - elif module.params.get('create_changeset'): - result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) + result = create_stack(module, stack_params, cfn, module.params.get("events_limit")) + elif module.params.get("create_changeset"): + result = create_changeset(module, stack_params, cfn, module.params.get("events_limit")) else: - if module.params.get('termination_protection') is not None: - update_termination_protection(module, cfn, stack_params['StackName'], - bool(module.params.get('termination_protection'))) - result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) + if module.params.get("termination_protection") is not None: + update_termination_protection( + module, cfn, stack_params["StackName"], bool(module.params.get("termination_protection")) + ) + result = update_stack(module, stack_params, cfn, module.params.get("events_limit")) # format the stack output - stack = get_stack_facts(module, cfn, stack_params['StackName']) + stack = get_stack_facts(module, cfn, stack_params["StackName"]) if stack is not None: - if result.get('stack_outputs') is None: + if result.get("stack_outputs") is None: # always define stack_outputs, but it may be empty - result['stack_outputs'] = {} - for output in stack.get('Outputs', []): - result['stack_outputs'][output['OutputKey']] = output['OutputValue'] + result["stack_outputs"] = {} + for output in stack.get("Outputs", []): + result["stack_outputs"][output["OutputKey"]] = output["OutputValue"] stack_resources = [] - reslist = cfn.list_stack_resources(aws_retry=True, StackName=stack_params['StackName']) - for res in reslist.get('StackResourceSummaries', []): - stack_resources.append({ - "logical_resource_id": res['LogicalResourceId'], - "physical_resource_id": res.get('PhysicalResourceId', ''), - "resource_type": res['ResourceType'], - "last_updated_time": res['LastUpdatedTimestamp'], - "status": res['ResourceStatus'], - "status_reason": res.get('ResourceStatusReason') # can be blank, apparently - }) - result['stack_resources'] = stack_resources - - elif state == 'absent': + reslist = cfn.list_stack_resources(aws_retry=True, StackName=stack_params["StackName"]) + for res in reslist.get("StackResourceSummaries", []): + stack_resources.append( + { + "logical_resource_id": res["LogicalResourceId"], + "physical_resource_id": res.get("PhysicalResourceId", ""), + "resource_type": res["ResourceType"], + "last_updated_time": res["LastUpdatedTimestamp"], + "status": res["ResourceStatus"], + "status_reason": res.get("ResourceStatusReason"), # can be blank, apparently + } + ) + result["stack_resources"] = stack_resources + + elif state == "absent": # absent state is different because of the way delete_stack works. # problem is it it doesn't give an error if stack isn't found # so must describe the stack first try: - stack = get_stack_facts(module, cfn, stack_params['StackName']) + stack = get_stack_facts(module, cfn, stack_params["StackName"]) if not stack: - result = {'changed': False, 'output': 'Stack not found.'} + result = {"changed": False, "output": "Stack not found."} else: - if stack_params.get('RoleARN') is None: - cfn.delete_stack(aws_retry=True, StackName=stack_params['StackName']) + if stack_params.get("RoleARN") is None: + cfn.delete_stack(aws_retry=True, StackName=stack_params["StackName"]) else: - cfn.delete_stack(aws_retry=True, StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) - result = stack_operation(module, cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), - stack_params.get('ClientRequestToken', None)) + cfn.delete_stack( + aws_retry=True, StackName=stack_params["StackName"], RoleARN=stack_params["RoleARN"] + ) + result = stack_operation( + module, + cfn, + stack_params["StackName"], + "DELETE", + module.params.get("events_limit"), + stack_params.get("ClientRequestToken", None), + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: module.fail_json_aws(err) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py index 89ba80bf7..697b39f00 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudformation_info version_added: 1.0.0 @@ -52,12 +50,12 @@ options: type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get information on all stacks @@ -100,9 +98,9 @@ EXAMPLES = ''' stack_name: nonexistent-stack all_facts: true failed_when: cloudformation['nonexistent-stack'] is undefined -''' +""" -RETURN = ''' +RETURN = r""" cloudformation: description: - Dictionary of dictionaries containing info of stack(s). @@ -287,7 +285,7 @@ cloudformation: 'TagOne': 'ValueOne', 'TagTwo': 'ValueTwo' } -''' +""" import json @@ -298,10 +296,10 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict class CloudFormationServiceManager: @@ -309,29 +307,32 @@ class CloudFormationServiceManager: def __init__(self, module): self.module = module - self.client = module.client('cloudformation') + self.client = module.client("cloudformation") @AWSRetry.exponential_backoff(retries=5, delay=5) def describe_stacks_with_backoff(self, **kwargs): - paginator = self.client.get_paginator('describe_stacks') - return paginator.paginate(**kwargs).build_full_result()['Stacks'] + paginator = self.client.get_paginator("describe_stacks") + return paginator.paginate(**kwargs).build_full_result()["Stacks"] def describe_stacks(self, stack_name=None): try: - kwargs = {'StackName': stack_name} if stack_name else {} + kwargs = {"StackName": stack_name} if stack_name else {} response = self.describe_stacks_with_backoff(**kwargs) if response is not None: return response self.module.fail_json(msg="Error describing stack(s) - an empty response was returned") - except is_boto3_error_message('does not exist'): + except is_boto3_error_message("does not exist"): return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Error describing stack " + stack_name) @AWSRetry.exponential_backoff(retries=5, delay=5) def list_stack_resources_with_backoff(self, stack_name): - paginator = self.client.get_paginator('list_stack_resources') - return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries'] + paginator = self.client.get_paginator("list_stack_resources") + return paginator.paginate(StackName=stack_name).build_full_result()["StackResourceSummaries"] def list_stack_resources(self, stack_name): try: @@ -341,8 +342,8 @@ class CloudFormationServiceManager: @AWSRetry.exponential_backoff(retries=5, delay=5) def describe_stack_events_with_backoff(self, stack_name): - paginator = self.client.get_paginator('describe_stack_events') - return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents'] + paginator = self.client.get_paginator("describe_stack_events") + return paginator.paginate(StackName=stack_name).build_full_result()["StackEvents"] def describe_stack_events(self, stack_name): try: @@ -352,12 +353,12 @@ class CloudFormationServiceManager: @AWSRetry.exponential_backoff(retries=5, delay=5) def list_stack_change_sets_with_backoff(self, stack_name): - paginator = self.client.get_paginator('list_change_sets') - return paginator.paginate(StackName=stack_name).build_full_result()['Summaries'] + paginator = self.client.get_paginator("list_change_sets") + return paginator.paginate(StackName=stack_name).build_full_result()["Summaries"] @AWSRetry.exponential_backoff(retries=5, delay=5) def describe_stack_change_set_with_backoff(self, **kwargs): - paginator = self.client.get_paginator('describe_change_set') + paginator = self.client.get_paginator("describe_change_set") return paginator.paginate(**kwargs).build_full_result() def describe_stack_change_sets(self, stack_name): @@ -365,9 +366,11 @@ class CloudFormationServiceManager: try: change_sets = self.list_stack_change_sets_with_backoff(stack_name) for item in change_sets: - changes.append(self.describe_stack_change_set_with_backoff( - StackName=stack_name, - ChangeSetName=item['ChangeSetName'])) + changes.append( + self.describe_stack_change_set_with_backoff( + StackName=stack_name, ChangeSetName=item["ChangeSetName"] + ) + ) return changes except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name) @@ -379,7 +382,7 @@ class CloudFormationServiceManager: def get_stack_policy(self, stack_name): try: response = self.get_stack_policy_with_backoff(stack_name) - stack_policy = response.get('StackPolicyBody') + stack_policy = response.get("StackPolicyBody") if stack_policy: return json.loads(stack_policy) return dict() @@ -393,13 +396,13 @@ class CloudFormationServiceManager: def get_template(self, stack_name): try: response = self.get_template_with_backoff(stack_name) - return response.get('TemplateBody') + return response.get("TemplateBody") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name) def to_dict(items, key, value): - ''' Transforms a list of items to a Key/Value dictionary ''' + """Transforms a list of items to a Key/Value dictionary""" if items: return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) else: @@ -409,53 +412,60 @@ def to_dict(items, key, value): def main(): argument_spec = dict( stack_name=dict(), - all_facts=dict(required=False, default=False, type='bool'), - stack_policy=dict(required=False, default=False, type='bool'), - stack_events=dict(required=False, default=False, type='bool'), - stack_resources=dict(required=False, default=False, type='bool'), - stack_template=dict(required=False, default=False, type='bool'), - stack_change_sets=dict(required=False, default=False, type='bool'), + all_facts=dict(required=False, default=False, type="bool"), + stack_policy=dict(required=False, default=False, type="bool"), + stack_events=dict(required=False, default=False, type="bool"), + stack_resources=dict(required=False, default=False, type="bool"), + stack_template=dict(required=False, default=False, type="bool"), + stack_change_sets=dict(required=False, default=False, type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) service_mgr = CloudFormationServiceManager(module) - result = {'cloudformation': {}} + result = {"cloudformation": {}} - for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')): - facts = {'stack_description': stack_description} - stack_name = stack_description.get('StackName') + for stack_description in service_mgr.describe_stacks(module.params.get("stack_name")): + facts = {"stack_description": stack_description} + stack_name = stack_description.get("StackName") # Create stack output and stack parameter dictionaries - if facts['stack_description']: - facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') - facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), - 'ParameterKey', 'ParameterValue') - facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags')) + if facts["stack_description"]: + facts["stack_outputs"] = to_dict(facts["stack_description"].get("Outputs"), "OutputKey", "OutputValue") + facts["stack_parameters"] = to_dict( + facts["stack_description"].get("Parameters"), "ParameterKey", "ParameterValue" + ) + facts["stack_tags"] = boto3_tag_list_to_ansible_dict(facts["stack_description"].get("Tags")) # Create optional stack outputs - all_facts = module.params.get('all_facts') - if all_facts or module.params.get('stack_resources'): - facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name) - facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), - 'LogicalResourceId', 'PhysicalResourceId') - if all_facts or module.params.get('stack_template'): - facts['stack_template'] = service_mgr.get_template(stack_name) - if all_facts or module.params.get('stack_policy'): - facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) - if all_facts or module.params.get('stack_events'): - facts['stack_events'] = service_mgr.describe_stack_events(stack_name) - if all_facts or module.params.get('stack_change_sets'): - facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name) - - result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', - 'stack_parameters', - 'stack_policy', - 'stack_resources', - 'stack_tags', - 'stack_template')) + all_facts = module.params.get("all_facts") + if all_facts or module.params.get("stack_resources"): + facts["stack_resource_list"] = service_mgr.list_stack_resources(stack_name) + facts["stack_resources"] = to_dict( + facts.get("stack_resource_list"), "LogicalResourceId", "PhysicalResourceId" + ) + if all_facts or module.params.get("stack_template"): + facts["stack_template"] = service_mgr.get_template(stack_name) + if all_facts or module.params.get("stack_policy"): + facts["stack_policy"] = service_mgr.get_stack_policy(stack_name) + if all_facts or module.params.get("stack_events"): + facts["stack_events"] = service_mgr.describe_stack_events(stack_name) + if all_facts or module.params.get("stack_change_sets"): + facts["stack_change_sets"] = service_mgr.describe_stack_change_sets(stack_name) + + result["cloudformation"][stack_name] = camel_dict_to_snake_dict( + facts, + ignore_list=( + "stack_outputs", + "stack_parameters", + "stack_policy", + "stack_resources", + "stack_tags", + "stack_template", + ), + ) module.exit_json(changed=False, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py index af48e7ea8..597d43f1b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudtrail version_added: 5.0.0 @@ -94,14 +92,13 @@ notes: - The I(purge_tags) option was added in release 4.0.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: create single region cloudtrail amazon.aws.cloudtrail: state: present @@ -150,9 +147,9 @@ EXAMPLES = ''' amazon.aws.cloudtrail: state: absent name: default -''' +""" -RETURN = ''' +RETURN = r""" exists: description: whether the resource exists returned: always @@ -244,16 +241,17 @@ trail: returned: success type: dict sample: {'environment': 'dev', 'Name': 'default'} -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @@ -274,7 +272,7 @@ def get_kms_key_aliases(module, client, keyId): # in case user doesn't have kms:ListAliases permissions return [] - return key_resp['Aliases'] + return key_resp["Aliases"] def create_trail(module, client, ct_params): @@ -344,7 +342,7 @@ def get_tag_list(keys, tags): """ tag_list = [] for k in keys: - tag_list.append({'Key': k, 'Value': tags[k]}) + tag_list.append({"Key": k, "Value": tags[k]}) return tag_list @@ -358,13 +356,13 @@ def set_logging(module, client, name, action): name : The name or ARN of the CloudTrail to operate on action : start or stop """ - if action == 'start': + if action == "start": try: client.start_logging(Name=name) return client.get_trail_status(Name=name) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to start logging") - elif action == 'stop': + elif action == "stop": try: client.stop_logging(Name=name) return client.get_trail_status(Name=name) @@ -389,18 +387,27 @@ def get_trail_facts(module, client, name): module.fail_json_aws(err, msg="Failed to describe Trail") # Now check to see if our trail exists and get status and tags - if len(trail_resp['trailList']): - trail = trail_resp['trailList'][0] + if len(trail_resp["trailList"]): + trail = trail_resp["trailList"][0] try: - status_resp = client.get_trail_status(Name=trail['Name']) - tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']]) + status_resp = client.get_trail_status(Name=trail["Name"]) + tags_list = client.list_tags(ResourceIdList=[trail["TrailARN"]]) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to describe Trail") - trail['IsLogging'] = status_resp['IsLogging'] - trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList']) + trail["IsLogging"] = status_resp["IsLogging"] + trail["tags"] = boto3_tag_list_to_ansible_dict(tags_list["ResourceTagList"][0]["TagsList"]) # Check for non-existent values and populate with None - optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId']) + optional_vals = set( + [ + "S3KeyPrefix", + "SnsTopicName", + "SnsTopicARN", + "CloudWatchLogsLogGroupArn", + "CloudWatchLogsRoleArn", + "KmsKeyId", + ] + ) for v in optional_vals - set(trail.keys()): trail[v] = None return trail @@ -440,160 +447,163 @@ def update_trail(module, client, ct_params): def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), - name=dict(default='default'), - enable_logging=dict(default=True, type='bool'), + state=dict(default="present", choices=["present", "absent", "enabled", "disabled"]), + name=dict(default="default"), + enable_logging=dict(default=True, type="bool"), s3_bucket_name=dict(), s3_key_prefix=dict(no_log=False), sns_topic_name=dict(), - is_multi_region_trail=dict(default=False, type='bool'), - enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']), - include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']), + is_multi_region_trail=dict(default=False, type="bool"), + enable_log_file_validation=dict(type="bool", aliases=["log_file_validation_enabled"]), + include_global_events=dict(default=True, type="bool", aliases=["include_global_service_events"]), cloudwatch_logs_role_arn=dict(), cloudwatch_logs_log_group_arn=dict(), kms_key_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool') + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) - required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])] - required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')] + required_if = [("state", "present", ["s3_bucket_name"]), ("state", "enabled", ["s3_bucket_name"])] + required_together = [("cloudwatch_logs_role_arn", "cloudwatch_logs_log_group_arn")] - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=required_together, + required_if=required_if, + ) # collect parameters - if module.params['state'] in ('present', 'enabled'): - state = 'present' - elif module.params['state'] in ('absent', 'disabled'): - state = 'absent' - tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - enable_logging = module.params['enable_logging'] + if module.params["state"] in ("present", "enabled"): + state = "present" + elif module.params["state"] in ("absent", "disabled"): + state = "absent" + tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + enable_logging = module.params["enable_logging"] ct_params = dict( - Name=module.params['name'], - S3BucketName=module.params['s3_bucket_name'], - IncludeGlobalServiceEvents=module.params['include_global_events'], - IsMultiRegionTrail=module.params['is_multi_region_trail'], + Name=module.params["name"], + S3BucketName=module.params["s3_bucket_name"], + IncludeGlobalServiceEvents=module.params["include_global_events"], + IsMultiRegionTrail=module.params["is_multi_region_trail"], ) - if module.params['s3_key_prefix']: - ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/') + if module.params["s3_key_prefix"]: + ct_params["S3KeyPrefix"] = module.params["s3_key_prefix"].rstrip("/") - if module.params['sns_topic_name']: - ct_params['SnsTopicName'] = module.params['sns_topic_name'] + if module.params["sns_topic_name"]: + ct_params["SnsTopicName"] = module.params["sns_topic_name"] - if module.params['cloudwatch_logs_role_arn']: - ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn'] + if module.params["cloudwatch_logs_role_arn"]: + ct_params["CloudWatchLogsRoleArn"] = module.params["cloudwatch_logs_role_arn"] - if module.params['cloudwatch_logs_log_group_arn']: - ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn'] + if module.params["cloudwatch_logs_log_group_arn"]: + ct_params["CloudWatchLogsLogGroupArn"] = module.params["cloudwatch_logs_log_group_arn"] - if module.params['enable_log_file_validation'] is not None: - ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation'] + if module.params["enable_log_file_validation"] is not None: + ct_params["EnableLogFileValidation"] = module.params["enable_log_file_validation"] if module.params["kms_key_id"] is not None: ct_params["KmsKeyId"] = module.params["kms_key_id"] - client = module.client('cloudtrail') + client = module.client("cloudtrail") region = module.region - results = dict( - changed=False, - exists=False - ) + results = dict(changed=False, exists=False) # Get existing trail facts - trail = get_trail_facts(module, client, ct_params['Name']) + trail = get_trail_facts(module, client, ct_params["Name"]) # If the trail exists set the result exists variable if trail is not None: - results['exists'] = True - initial_kms_key_id = trail.get('KmsKeyId') + results["exists"] = True + initial_kms_key_id = trail.get("KmsKeyId") - if state == 'absent' and results['exists']: + if state == "absent" and results["exists"]: # If Trail exists go ahead and delete - results['changed'] = True - results['exists'] = False - results['trail'] = dict() + results["changed"] = True + results["exists"] = False + results["trail"] = dict() if not module.check_mode: - delete_trail(module, client, trail['TrailARN']) + delete_trail(module, client, trail["TrailARN"]) - elif state == 'present' and results['exists']: + elif state == "present" and results["exists"]: # If Trail exists see if we need to update it do_update = False for key in ct_params: tkey = str(key) # boto3 has inconsistent parameter naming so we handle it here - if key == 'EnableLogFileValidation': - tkey = 'LogFileValidationEnabled' + if key == "EnableLogFileValidation": + tkey = "LogFileValidationEnabled" # We need to make an empty string equal None - if ct_params.get(key) == '': + if ct_params.get(key) == "": val = None else: val = ct_params.get(key) if val != trail.get(tkey): do_update = True - if tkey != 'KmsKeyId': + if tkey != "KmsKeyId": # We'll check if the KmsKeyId casues changes later since # user could've provided a key alias, alias arn, or key id # and trail['KmsKeyId'] is always a key arn - results['changed'] = True + results["changed"] = True # If we are in check mode copy the changed values to the trail facts in result output to show what would change. if module.check_mode: trail.update({tkey: ct_params.get(key)}) if not module.check_mode and do_update: update_trail(module, client, ct_params) - trail = get_trail_facts(module, client, ct_params['Name']) + trail = get_trail_facts(module, client, ct_params["Name"]) # Determine if KmsKeyId changed if not module.check_mode: - if initial_kms_key_id != trail.get('KmsKeyId'): - results['changed'] = True + if initial_kms_key_id != trail.get("KmsKeyId"): + results["changed"] = True else: - new_key = ct_params.get('KmsKeyId') + new_key = ct_params.get("KmsKeyId") if initial_kms_key_id != new_key: # Assume changed for a moment - results['changed'] = True + results["changed"] = True # However, new_key could be a key id, alias arn, or alias name # that maps back to the key arn in initial_kms_key_id. So check # all aliases for a match. - initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id) + initial_aliases = get_kms_key_aliases(module, module.client("kms"), initial_kms_key_id) for a in initial_aliases: - if a['AliasName'] == new_key or a['AliasArn'] == new_key or a['TargetKeyId'] == new_key: - results['changed'] = False + if a["AliasName"] == new_key or a["AliasArn"] == new_key or a["TargetKeyId"] == new_key: + results["changed"] = False # Check if we need to start/stop logging - if enable_logging and not trail['IsLogging']: - results['changed'] = True - trail['IsLogging'] = True + if enable_logging and not trail["IsLogging"]: + results["changed"] = True + trail["IsLogging"] = True if not module.check_mode: - set_logging(module, client, name=ct_params['Name'], action='start') - if not enable_logging and trail['IsLogging']: - results['changed'] = True - trail['IsLogging'] = False + set_logging(module, client, name=ct_params["Name"], action="start") + if not enable_logging and trail["IsLogging"]: + results["changed"] = True + trail["IsLogging"] = False if not module.check_mode: - set_logging(module, client, name=ct_params['Name'], action='stop') + set_logging(module, client, name=ct_params["Name"], action="stop") # Check if we need to update tags on resource - tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], - purge_tags=purge_tags) + tags_changed = tag_trail( + module, client, tags=tags, trail_arn=trail["TrailARN"], curr_tags=trail["tags"], purge_tags=purge_tags + ) if tags_changed: updated_tags = dict() if not purge_tags: - updated_tags = trail['tags'] + updated_tags = trail["tags"] updated_tags.update(tags) - results['changed'] = True - trail['tags'] = updated_tags + results["changed"] = True + trail["tags"] = updated_tags # Populate trail facts in output - results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) + results["trail"] = camel_dict_to_snake_dict(trail, ignore_list=["tags"]) - elif state == 'present' and not results['exists']: + elif state == "present" and not results["exists"]: # Trail doesn't exist just go create it - results['changed'] = True - results['exists'] = True + results["changed"] = True + results["exists"] = True if not module.check_mode: if tags: ct_params["TagsList"] = ansible_dict_to_boto3_tag_list(tags) @@ -601,42 +611,42 @@ def main(): created_trail = create_trail(module, client, ct_params) # Get the trail status try: - status_resp = client.get_trail_status(Name=created_trail['Name']) + status_resp = client.get_trail_status(Name=created_trail["Name"]) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to fetch Trail statuc") # Set the logging state for the trail to desired value - if enable_logging and not status_resp['IsLogging']: - set_logging(module, client, name=ct_params['Name'], action='start') - if not enable_logging and status_resp['IsLogging']: - set_logging(module, client, name=ct_params['Name'], action='stop') + if enable_logging and not status_resp["IsLogging"]: + set_logging(module, client, name=ct_params["Name"], action="start") + if not enable_logging and status_resp["IsLogging"]: + set_logging(module, client, name=ct_params["Name"], action="stop") # Get facts for newly created Trail - trail = get_trail_facts(module, client, ct_params['Name']) + trail = get_trail_facts(module, client, ct_params["Name"]) # If we are in check mode create a fake return structure for the newly minted trail if module.check_mode: - acct_id = '123456789012' + acct_id = "123456789012" try: - sts_client = module.client('sts') - acct_id = sts_client.get_caller_identity()['Account'] + sts_client = module.client("sts") + acct_id = sts_client.get_caller_identity()["Account"] except (BotoCoreError, ClientError): pass trail = dict() trail.update(ct_params) - if 'EnableLogFileValidation' not in ct_params: - ct_params['EnableLogFileValidation'] = False - trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation'] - trail.pop('EnableLogFileValidation') - fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name'] - trail['HasCustomEventSelectors'] = False - trail['HomeRegion'] = region - trail['TrailARN'] = fake_arn - trail['IsLogging'] = enable_logging - trail['tags'] = tags + if "EnableLogFileValidation" not in ct_params: + ct_params["EnableLogFileValidation"] = False + trail["EnableLogFileValidation"] = ct_params["EnableLogFileValidation"] + trail.pop("EnableLogFileValidation") + fake_arn = "arn:aws:cloudtrail:" + region + ":" + acct_id + ":trail/" + ct_params["Name"] + trail["HasCustomEventSelectors"] = False + trail["HomeRegion"] = region + trail["TrailARN"] = fake_arn + trail["IsLogging"] = enable_logging + trail["tags"] = tags # Populate trail facts in output - results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) + results["trail"] = camel_dict_to_snake_dict(trail, ignore_list=["tags"]) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py index 0429bb7f0..d1e51baf8 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudtrail_info version_added: 5.0.0 @@ -27,12 +25,12 @@ options: default: true description: Specifies whether to include shadow trails in the response. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all trails @@ -42,10 +40,9 @@ EXAMPLES = ''' - amazon.aws.cloudtrail_info: trail_names: - arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail +""" -''' - -RETURN = ''' +RETURN = r""" trail_list: description: List of trail objects. Each element consists of a dict with all the information related to that cloudtrail. type: list @@ -151,8 +148,7 @@ trail_list: type: dict returned: always sample: "{ 'my_tag_key': 'my_tag_value' }" - -''' +""" try: import botocore @@ -161,15 +157,15 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict def get_trails(connection, module): all_trails = [] try: - result = connection.get_paginator('list_trails') + result = connection.get_paginator("list_trails") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get the trails.") for trail in result.paginate(): @@ -188,12 +184,14 @@ def get_trail_detail(connection, module): if not trail_name_list: trail_name_list = get_trails(connection, module) try: - result = connection.describe_trails(trailNameList=trail_name_list, includeShadowTrails=include_shadow_trails, aws_retry=True) + result = connection.describe_trails( + trailNameList=trail_name_list, includeShadowTrails=include_shadow_trails, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get the trails.") # Turn the boto3 result in to ansible_friendly_snaked_names snaked_cloud_trail = [] - for cloud_trail in result['trailList']: + for cloud_trail in result["trailList"]: try: status_dict = connection.get_trail_status(Name=cloud_trail["TrailARN"], aws_retry=True) cloud_trail.update(status_dict) @@ -204,35 +202,35 @@ def get_trail_detail(connection, module): for tag_dict in tag_list["ResourceTagList"]: cloud_trail.update(tag_dict) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.warn("Failed to get the trail tags - {0}".format(e)) + module.warn(f"Failed to get the trail tags - {e}") snaked_cloud_trail.append(camel_dict_to_snake_dict(cloud_trail)) # Turn the boto3 result in to ansible friendly tag dictionary for tr in snaked_cloud_trail: - if 'tags_list' in tr: - tr['tags'] = boto3_tag_list_to_ansible_dict(tr['tags_list'], 'key', 'value') - del (tr['tags_list']) - if 'response_metadata' in tr: - del (tr['response_metadata']) - output['trail_list'] = snaked_cloud_trail + if "tags_list" in tr: + tr["tags"] = boto3_tag_list_to_ansible_dict(tr["tags_list"], "key", "value") + del tr["tags_list"] + if "response_metadata" in tr: + del tr["response_metadata"] + output["trail_list"] = snaked_cloud_trail return output def main(): argument_spec = dict( - trail_names=dict(type='list', elements='str', default=[]), - include_shadow_trails=dict(type='bool', default=True), + trail_names=dict(type="list", elements="str", default=[]), + include_shadow_trails=dict(type="bool", default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - connection = module.client('cloudtrail', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("cloudtrail", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") result = get_trail_detail(connection, module) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py index af66b39e0..e3a174913 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py @@ -1,24 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" module: cloudwatch_metric_alarm short_description: "Create/update or delete AWS CloudWatch 'metric alarms'" version_added: 5.0.0 @@ -57,6 +43,7 @@ options: required: false version_added: "5.5.0" elements: dict + default: [] suboptions: id: description: @@ -216,7 +203,6 @@ options: - U(https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension) required: false type: dict - default: {} alarm_actions: description: - A list of the names action(s) taken when the alarm is in the C(alarm) status, denoted as Amazon Resource Name(s). @@ -250,86 +236,89 @@ options: - 'missing' default: 'missing' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' - -EXAMPLES = r''' - - name: create alarm - amazon.aws.cloudwatch_metric_alarm: - state: present - region: ap-southeast-2 - name: "cpu-low" - metric_name: "CPUUtilization" - namespace: "AWS/EC2" - statistic: Average - comparison: "LessThanOrEqualToThreshold" - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: "Percent" - description: "This will alarm when a instance's CPU usage average is lower than 5% for 15 minutes" - dimensions: {'InstanceId':'i-XXX'} - alarm_actions: ["action1","action2"] - - - name: create alarm with metrics - amazon.aws.cloudwatch_metric_alarm: - state: present - region: ap-southeast-2 - name: "cpu-low" - metrics: - - id: 'CPU' - metric_stat: - metric: - dimensions: - name: "InstanceId" - value: "i-xx" - metric_name: "CPUUtilization" - namespace: "AWS/EC2" - period: "300" - stat: "Average" - unit: "Percent" - return_data: False - alarm_actions: ["action1","action2"] - - - name: Create an alarm to recover a failed instance - amazon.aws.cloudwatch_metric_alarm: - state: present - region: us-west-1 - name: "recover-instance" - metric: "StatusCheckFailed_System" - namespace: "AWS/EC2" - statistic: "Minimum" - comparison: "GreaterThanOrEqualToThreshold" - threshold: 1.0 - period: 60 - evaluation_periods: 2 - unit: "Count" - description: "This will recover an instance when it fails" - dimensions: {"InstanceId":'i-XXX'} - alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"] -''' +""" + +RETURN = r""" # """ + +EXAMPLES = r""" +- name: create alarm + amazon.aws.cloudwatch_metric_alarm: + state: present + region: ap-southeast-2 + name: "cpu-low" + metric_name: "CPUUtilization" + namespace: "AWS/EC2" + statistic: Average + comparison: "LessThanOrEqualToThreshold" + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: "Percent" + description: "This will alarm when a instance's CPU usage average is lower than 5% for 15 minutes" + dimensions: {'InstanceId': 'i-XXX'} + alarm_actions: ["action1", "action2"] + +- name: create alarm with metrics + amazon.aws.cloudwatch_metric_alarm: + state: present + region: ap-southeast-2 + name: "cpu-low" + metrics: + - id: 'CPU' + metric_stat: + metric: + dimensions: + name: "InstanceId" + value: "i-xx" + metric_name: "CPUUtilization" + namespace: "AWS/EC2" + period: "300" + stat: "Average" + unit: "Percent" + return_data: false + alarm_actions: ["action1", "action2"] + +- name: Create an alarm to recover a failed instance + amazon.aws.cloudwatch_metric_alarm: + state: present + region: us-west-1 + name: "recover-instance" + metric: "StatusCheckFailed_System" + namespace: "AWS/EC2" + statistic: "Minimum" + comparison: "GreaterThanOrEqualToThreshold" + threshold: 1.0 + period: 60 + evaluation_periods: 2 + unit: "Count" + description: "This will recover an instance when it fails" + dimensions: {"InstanceId":'i-XXX'} + alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"] +""" try: from botocore.exceptions import ClientError except ImportError: pass # protected by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + def create_metric_alarm(connection, module, params): - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) - if params.get('Dimensions'): - if not isinstance(params['Dimensions'], list): + alarms = connection.describe_alarms(AlarmNames=[params["AlarmName"]]) + if params.get("Dimensions"): + if not isinstance(params["Dimensions"], list): fixed_dimensions = [] - for key, value in params['Dimensions'].items(): - fixed_dimensions.append({'Name': key, 'Value': value}) - params['Dimensions'] = fixed_dimensions + for key, value in params["Dimensions"].items(): + fixed_dimensions.append({"Name": key, "Value": value}) + params["Dimensions"] = fixed_dimensions - if not alarms['MetricAlarms']: + if not alarms["MetricAlarms"]: try: if not module.check_mode: connection.put_metric_alarm(**params) @@ -339,17 +328,24 @@ def create_metric_alarm(connection, module, params): else: changed = False - alarm = alarms['MetricAlarms'][0] + alarm = alarms["MetricAlarms"][0] # Workaround for alarms created before TreatMissingData was introduced - if 'TreatMissingData' not in alarm.keys(): - alarm['TreatMissingData'] = 'missing' + if "TreatMissingData" not in alarm.keys(): + alarm["TreatMissingData"] = "missing" # Exclude certain props from change detection - for key in ['ActionsEnabled', 'StateValue', 'StateReason', - 'StateReasonData', 'StateUpdatedTimestamp', - 'StateTransitionedTimestamp', - 'AlarmArn', 'AlarmConfigurationUpdatedTimestamp', 'Metrics']: + for key in [ + "ActionsEnabled", + "StateValue", + "StateReason", + "StateReasonData", + "StateUpdatedTimestamp", + "StateTransitionedTimestamp", + "AlarmArn", + "AlarmConfigurationUpdatedTimestamp", + "Metrics", + ]: alarm.pop(key, None) if alarm != params: changed = True @@ -363,53 +359,55 @@ def create_metric_alarm(connection, module, params): module.fail_json_aws(e) try: - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) + alarms = connection.describe_alarms(AlarmNames=[params["AlarmName"]]) except ClientError as e: module.fail_json_aws(e) result = {} - if alarms['MetricAlarms']: - if alarms['MetricAlarms'][0].get('Metrics'): + if alarms["MetricAlarms"]: + if alarms["MetricAlarms"][0].get("Metrics"): metric_list = [] - for metric_element in alarms['MetricAlarms'][0]['Metrics']: + for metric_element in alarms["MetricAlarms"][0]["Metrics"]: metric_list.append(camel_dict_to_snake_dict(metric_element)) - alarms['MetricAlarms'][0]['Metrics'] = metric_list - result = alarms['MetricAlarms'][0] - - module.exit_json(changed=changed, - name=result.get('AlarmName'), - actions_enabled=result.get('ActionsEnabled'), - alarm_actions=result.get('AlarmActions'), - alarm_arn=result.get('AlarmArn'), - comparison=result.get('ComparisonOperator'), - description=result.get('AlarmDescription'), - dimensions=result.get('Dimensions'), - evaluation_periods=result.get('EvaluationPeriods'), - insufficient_data_actions=result.get('InsufficientDataActions'), - last_updated=result.get('AlarmConfigurationUpdatedTimestamp'), - metric=result.get('MetricName'), - metric_name=result.get('MetricName'), - metrics=result.get('Metrics'), - namespace=result.get('Namespace'), - ok_actions=result.get('OKActions'), - period=result.get('Period'), - state_reason=result.get('StateReason'), - state_value=result.get('StateValue'), - statistic=result.get('Statistic'), - threshold=result.get('Threshold'), - treat_missing_data=result.get('TreatMissingData'), - unit=result.get('Unit')) + alarms["MetricAlarms"][0]["Metrics"] = metric_list + result = alarms["MetricAlarms"][0] + + module.exit_json( + changed=changed, + name=result.get("AlarmName"), + actions_enabled=result.get("ActionsEnabled"), + alarm_actions=result.get("AlarmActions"), + alarm_arn=result.get("AlarmArn"), + comparison=result.get("ComparisonOperator"), + description=result.get("AlarmDescription"), + dimensions=result.get("Dimensions"), + evaluation_periods=result.get("EvaluationPeriods"), + insufficient_data_actions=result.get("InsufficientDataActions"), + last_updated=result.get("AlarmConfigurationUpdatedTimestamp"), + metric=result.get("MetricName"), + metric_name=result.get("MetricName"), + metrics=result.get("Metrics"), + namespace=result.get("Namespace"), + ok_actions=result.get("OKActions"), + period=result.get("Period"), + state_reason=result.get("StateReason"), + state_value=result.get("StateValue"), + statistic=result.get("Statistic"), + threshold=result.get("Threshold"), + treat_missing_data=result.get("TreatMissingData"), + unit=result.get("Unit"), + ) def delete_metric_alarm(connection, module, params): - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) + alarms = connection.describe_alarms(AlarmNames=[params["AlarmName"]]) - if alarms['MetricAlarms']: + if alarms["MetricAlarms"]: try: if not module.check_mode: - connection.delete_alarms(AlarmNames=[params['AlarmName']]) + connection.delete_alarms(AlarmNames=[params["AlarmName"]]) module.exit_json(changed=True) - except (ClientError) as e: + except ClientError as e: module.fail_json_aws(e) else: module.exit_json(changed=False) @@ -417,40 +415,76 @@ def delete_metric_alarm(connection, module, params): def main(): argument_spec = dict( - name=dict(required=True, type='str'), - metric_name=dict(type='str', aliases=['metric']), - namespace=dict(type='str'), - statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), - comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold', - 'GreaterThanOrEqualToThreshold']), - threshold=dict(type='float'), - period=dict(type='int'), - unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', - 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', - 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', - 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', - 'Terabits/Second', 'Count/Second', 'None']), - evaluation_periods=dict(type='int'), - extended_statistic=dict(type='str'), - description=dict(type='str'), - dimensions=dict(type='dict'), - alarm_actions=dict(type='list', default=[], elements='str'), - insufficient_data_actions=dict(type='list', default=[], elements='str'), - ok_actions=dict(type='list', default=[], elements='str'), - treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'), - state=dict(default='present', choices=['present', 'absent']), - metrics=dict(type='list', elements='dict', default=[]), + name=dict(required=True, type="str"), + metric_name=dict(type="str", aliases=["metric"]), + namespace=dict(type="str"), + statistic=dict(type="str", choices=["SampleCount", "Average", "Sum", "Minimum", "Maximum"]), + comparison=dict( + type="str", + choices=[ + "LessThanOrEqualToThreshold", + "LessThanThreshold", + "GreaterThanThreshold", + "GreaterThanOrEqualToThreshold", + ], + ), + threshold=dict(type="float"), + period=dict(type="int"), + unit=dict( + type="str", + choices=[ + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Count", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second", + "None", + ], + ), + evaluation_periods=dict(type="int"), + extended_statistic=dict(type="str"), + description=dict(type="str"), + dimensions=dict(type="dict"), + alarm_actions=dict(type="list", default=[], elements="str"), + insufficient_data_actions=dict(type="list", default=[], elements="str"), + ok_actions=dict(type="list", default=[], elements="str"), + treat_missing_data=dict( + type="str", choices=["breaching", "notBreaching", "ignore", "missing"], default="missing" + ), + state=dict(default="present", choices=["present", "absent"]), + metrics=dict(type="list", elements="dict", default=[]), ) mutually_exclusive = [ - ['metric_name', 'metrics'], - ['dimensions', 'metrics'], - ['period', 'metrics'], - ['namespace', 'metrics'], - ['statistic', 'metrics'], - ['extended_statistic', 'metrics'], - ['unit', 'metrics'], - ['statistic', 'extended_statistic'], + ["metric_name", "metrics"], + ["dimensions", "metrics"], + ["period", "metrics"], + ["namespace", "metrics"], + ["statistic", "metrics"], + ["extended_statistic", "metrics"], + ["unit", "metrics"], + ["statistic", "extended_statistic"], ] module = AnsibleAWSModule( @@ -459,41 +493,41 @@ def main(): supports_check_mode=True, ) - state = module.params.get('state') + state = module.params.get("state") params = dict() - params['AlarmName'] = module.params.get('name') - params['MetricName'] = module.params.get('metric_name') - params['Namespace'] = module.params.get('namespace') - params['Statistic'] = module.params.get('statistic') - params['ComparisonOperator'] = module.params.get('comparison') - params['Threshold'] = module.params.get('threshold') - params['Period'] = module.params.get('period') - params['EvaluationPeriods'] = module.params.get('evaluation_periods') - if module.params.get('unit'): - params['Unit'] = module.params.get('unit') - params['AlarmDescription'] = module.params.get('description') - params['Dimensions'] = module.params.get('dimensions') - params['AlarmActions'] = module.params.get('alarm_actions', []) - params['InsufficientDataActions'] = module.params.get('insufficient_data_actions', []) - params['OKActions'] = module.params.get('ok_actions', []) - params['TreatMissingData'] = module.params.get('treat_missing_data') - if module.params.get('metrics'): - params['Metrics'] = snake_dict_to_camel_dict(module.params['metrics'], capitalize_first=True) - if module.params.get('extended_statistic'): - params['ExtendedStatistic'] = module.params.get('extended_statistic') + params["AlarmName"] = module.params.get("name") + params["MetricName"] = module.params.get("metric_name") + params["Namespace"] = module.params.get("namespace") + params["Statistic"] = module.params.get("statistic") + params["ComparisonOperator"] = module.params.get("comparison") + params["Threshold"] = module.params.get("threshold") + params["Period"] = module.params.get("period") + params["EvaluationPeriods"] = module.params.get("evaluation_periods") + if module.params.get("unit"): + params["Unit"] = module.params.get("unit") + params["AlarmDescription"] = module.params.get("description") + params["Dimensions"] = module.params.get("dimensions") + params["AlarmActions"] = module.params.get("alarm_actions", []) + params["InsufficientDataActions"] = module.params.get("insufficient_data_actions", []) + params["OKActions"] = module.params.get("ok_actions", []) + params["TreatMissingData"] = module.params.get("treat_missing_data") + if module.params.get("metrics"): + params["Metrics"] = snake_dict_to_camel_dict(module.params["metrics"], capitalize_first=True) + if module.params.get("extended_statistic"): + params["ExtendedStatistic"] = module.params.get("extended_statistic") for key, value in list(params.items()): if value is None: del params[key] - connection = module.client('cloudwatch') + connection = module.client("cloudwatch") - if state == 'present': + if state == "present": create_metric_alarm(connection, module, params) - elif state == 'absent': + elif state == "absent": delete_metric_alarm(connection, module, params) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py index 24678b054..1e5287dcd 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudwatch_metric_alarm_info version_added: 5.0.0 @@ -59,35 +57,33 @@ options: type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: describe the metric alarm based on alarm names amazon.aws.cloudwatch_metric_alarm_info: alarm_names: - - my-test-alarm-1 - - my-test-alarm-2 + - my-test-alarm-1 + - my-test-alarm-2 - name: describe the metric alarm based alarm names and state value amazon.aws.cloudwatch_metric_alarm_info: alarm_names: - - my-test-alarm-1 - - my-test-alarm-2 + - my-test-alarm-1 + - my-test-alarm-2 state_value: OK - name: describe the metric alarm based alarm names prefix amazon.aws.cloudwatch_metric_alarm_info: alarm_name_prefix: my-test- +""" -''' - -RETURN = ''' +RETURN = r""" metric_alarms: description: The gathered information about specified metric alarms. returned: when success @@ -223,8 +219,7 @@ metric_alarms: description: This is the ID of the ANOMALY_DETECTION_BAND function used as the threshold for the alarm. returned: always type: str - -''' +""" try: @@ -232,92 +227,88 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + @AWSRetry.jittered_backoff(retries=10) def _describe_alarms(connection, **params): - paginator = connection.get_paginator('describe_alarms') + paginator = connection.get_paginator("describe_alarms") return paginator.paginate(**params).build_full_result() def describe_metric_alarms_info(connection, module): - params = build_params(module) - alarm_type_to_return = module.params.get('alarm_type') + alarm_type_to_return = module.params.get("alarm_type") try: describe_metric_alarms_info_response = _describe_alarms(connection, **params) # describe_metric_alarms_info_response = describe_metric_alarms_info_response[alarm_type_to_return] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe cloudwatch metric alarm') + module.fail_json_aws(e, msg="Failed to describe cloudwatch metric alarm") result = [] - if alarm_type_to_return == 'CompositeAlarm': - for response_list_item in describe_metric_alarms_info_response['CompositeAlarms']: + if alarm_type_to_return == "CompositeAlarm": + for response_list_item in describe_metric_alarms_info_response["CompositeAlarms"]: result.append(camel_dict_to_snake_dict(response_list_item)) module.exit_json(composite_alarms=result) - for response_list_item in describe_metric_alarms_info_response['MetricAlarms']: + for response_list_item in describe_metric_alarms_info_response["MetricAlarms"]: result.append(camel_dict_to_snake_dict(response_list_item)) module.exit_json(metric_alarms=result) def build_params(module): - params = {} - if module.params.get('alarm_names'): - params['AlarmNames'] = module.params.get('alarm_names') + if module.params.get("alarm_names"): + params["AlarmNames"] = module.params.get("alarm_names") - if module.params.get('alarm_name_prefix'): - params['AlarmNamePrefix'] = module.params.get('alarm_name_prefix') + if module.params.get("alarm_name_prefix"): + params["AlarmNamePrefix"] = module.params.get("alarm_name_prefix") - if module.params.get('children_of_alarm_name'): - params['ChildrenOfAlarmName'] = module.params.get('children_of_alarm_name') + if module.params.get("children_of_alarm_name"): + params["ChildrenOfAlarmName"] = module.params.get("children_of_alarm_name") - if module.params.get('parents_of_alarm_name'): - params['ParentsOfAlarmName'] = module.params.get('parents_of_alarm_name') + if module.params.get("parents_of_alarm_name"): + params["ParentsOfAlarmName"] = module.params.get("parents_of_alarm_name") - if module.params.get('state_value'): - params['StateValue'] = module.params.get('state_value') + if module.params.get("state_value"): + params["StateValue"] = module.params.get("state_value") - if module.params.get('action_prefix'): - params['ActionPrefix'] = module.params.get('action_prefix') + if module.params.get("action_prefix"): + params["ActionPrefix"] = module.params.get("action_prefix") return params def main(): - argument_spec = dict( - alarm_names=dict(type='list', elements='str', required=False), - alarm_name_prefix=dict(type='str', required=False), - alarm_type=dict(type='str', choices=['CompositeAlarm', 'MetricAlarm'], default='MetricAlarm', required=False), - children_of_alarm_name=dict(type='str', required=False), - parents_of_alarm_name=dict(type='str', required=False), - state_value=dict(type='str', choices=['OK', 'ALARM', 'INSUFFICIENT_DATA'], required=False), - action_prefix=dict(type='str', required=False), + alarm_names=dict(type="list", elements="str", required=False), + alarm_name_prefix=dict(type="str", required=False), + alarm_type=dict(type="str", choices=["CompositeAlarm", "MetricAlarm"], default="MetricAlarm", required=False), + children_of_alarm_name=dict(type="str", required=False), + parents_of_alarm_name=dict(type="str", required=False), + state_value=dict(type="str", choices=["OK", "ALARM", "INSUFFICIENT_DATA"], required=False), + action_prefix=dict(type="str", required=False), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[['alarm_names', 'alarm_name_prefix']], - supports_check_mode=True + argument_spec=argument_spec, mutually_exclusive=[["alarm_names", "alarm_name_prefix"]], supports_check_mode=True ) try: - connection = module.client('cloudwatch', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("cloudwatch", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") describe_metric_alarms_info(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py index 3368ba69a..e8565546d 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: cloudwatchevent_rule version_added: 5.0.0 @@ -15,8 +13,8 @@ description: - This module creates and manages CloudWatch event rules and targets. - This module was originally added to C(community.aws) in release 1.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 author: @@ -124,9 +122,9 @@ options: type: int description: The number of tasks to create based on I(task_definition). required: false -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - amazon.aws.cloudwatchevent_rule: name: MyCronTask schedule_expression: "cron(0 20 * * ? *)" @@ -162,9 +160,9 @@ EXAMPLES = r''' - amazon.aws.cloudwatchevent_rule: name: MyCronTask state: absent -''' +""" -RETURN = r''' +RETURN = r""" rule: description: CloudWatch Event rule data. returned: success @@ -180,7 +178,7 @@ targets: returned: success type: list sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]" -''' +""" import json @@ -192,8 +190,8 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters @@ -206,9 +204,18 @@ def _format_json(json_string): return str(json.dumps(json_string)) -class CloudWatchEventRule(object): - def __init__(self, module, name, client, schedule_expression=None, - event_pattern=None, description=None, role_arn=None): +def _validate_json(s): + try: + json.loads(s) + return True + except json.JSONDecodeError: + return False + + +class CloudWatchEventRule: + def __init__( + self, module, name, client, schedule_expression=None, event_pattern=None, description=None, role_arn=None + ): self.name = name self.client = client self.changed = False @@ -222,30 +229,33 @@ class CloudWatchEventRule(object): """Returns the existing details of the rule in AWS""" try: rule_info = self.client.describe_rule(Name=self.name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) - return self._snakify(rule_info) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg=f"Could not describe rule {self.name}") + return camel_dict_to_snake_dict(rule_info) def put(self, enabled=True): """Creates or updates the rule in AWS""" request = { - 'Name': self.name, - 'State': "ENABLED" if enabled else "DISABLED", + "Name": self.name, + "State": "ENABLED" if enabled else "DISABLED", } if self.schedule_expression: - request['ScheduleExpression'] = self.schedule_expression + request["ScheduleExpression"] = self.schedule_expression if self.event_pattern: - request['EventPattern'] = self.event_pattern + request["EventPattern"] = self.event_pattern if self.description: - request['Description'] = self.description + request["Description"] = self.description if self.role_arn: - request['RoleArn'] = self.role_arn + request["RoleArn"] = self.role_arn try: response = self.client.put_rule(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not create/update rule {self.name}") self.changed = True return response @@ -256,7 +266,7 @@ class CloudWatchEventRule(object): try: response = self.client.delete_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not delete rule {self.name}") self.changed = True return response @@ -265,7 +275,7 @@ class CloudWatchEventRule(object): try: response = self.client.enable_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not enable rule {self.name}") self.changed = True return response @@ -274,7 +284,7 @@ class CloudWatchEventRule(object): try: response = self.client.disable_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not disable rule {self.name}") self.changed = True return response @@ -282,24 +292,27 @@ class CloudWatchEventRule(object): """Lists the existing targets for the rule in AWS""" try: targets = self.client.list_targets_by_rule(Rule=self.name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return [] - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) - return self._snakify(targets)['targets'] + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg=f"Could not find target for rule {self.name}") + return camel_dict_to_snake_dict(targets)["targets"] def put_targets(self, targets): """Creates or updates the provided targets on the rule in AWS""" if not targets: return request = { - 'Rule': self.name, - 'Targets': self._targets_request(targets), + "Rule": self.name, + "Targets": self._targets_request(targets), } try: response = self.client.put_targets(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not create/update rule targets for rule {self.name}") self.changed = True return response @@ -307,44 +320,39 @@ class CloudWatchEventRule(object): """Removes the provided targets from the rule in AWS""" if not target_ids: return - request = { - 'Rule': self.name, - 'Ids': target_ids - } + request = {"Rule": self.name, "Ids": target_ids} try: response = self.client.remove_targets(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not remove rule targets from rule {self.name}") self.changed = True return response def remove_all_targets(self): """Removes all targets on rule""" targets = self.list_targets() - return self.remove_targets([t['id'] for t in targets]) + return self.remove_targets([t["id"] for t in targets]) def _targets_request(self, targets): """Formats each target for the request""" targets_request = [] for target in targets: target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True)) - if target_request.get('Input', None): - target_request['Input'] = _format_json(target_request['Input']) - if target_request.get('InputTransformer', None): - if target_request.get('InputTransformer').get('InputTemplate', None): - target_request['InputTransformer']['InputTemplate'] = _format_json(target_request['InputTransformer']['InputTemplate']) - if target_request.get('InputTransformer').get('InputPathsMap', None): - target_request['InputTransformer']['InputPathsMap'] = target['input_transformer']['input_paths_map'] + if target_request.get("Input", None): + target_request["Input"] = _format_json(target_request["Input"]) + if target_request.get("InputTransformer", None): + if target_request.get("InputTransformer").get("InputTemplate", None): + target_request["InputTransformer"]["InputTemplate"] = _format_json( + target_request["InputTransformer"]["InputTemplate"] + ) + if target_request.get("InputTransformer").get("InputPathsMap", None): + target_request["InputTransformer"]["InputPathsMap"] = target["input_transformer"]["input_paths_map"] targets_request.append(target_request) return targets_request - def _snakify(self, dict): - """Converts camel case to snake case""" - return camel_dict_to_snake_dict(dict) - -class CloudWatchEventRuleManager(object): - RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn'] +class CloudWatchEventRuleManager: + RULE_FIELDS = ["name", "event_pattern", "schedule_expression", "description", "role_arn"] def __init__(self, rule, targets): self.rule = rule @@ -376,20 +384,16 @@ class CloudWatchEventRuleManager(object): def fetch_aws_state(self): """Retrieves rule and target state from AWS""" - aws_state = { - 'rule': {}, - 'targets': [], - 'changed': self.rule.changed - } + aws_state = {"rule": {}, "targets": [], "changed": self.rule.changed} rule_description = self.rule.describe() if not rule_description: return aws_state # Don't need to include response metadata noise in response - del rule_description['response_metadata'] + del rule_description["response_metadata"] - aws_state['rule'] = rule_description - aws_state['targets'].extend(self.rule.list_targets()) + aws_state["rule"] = rule_description + aws_state["targets"].extend(self.rule.list_targets()) return aws_state def _sync_rule(self, enabled=True): @@ -412,9 +416,9 @@ class CloudWatchEventRuleManager(object): def _sync_state(self, enabled=True): """Syncs local rule state with AWS""" remote_state = self._remote_state() - if enabled and remote_state != 'ENABLED': + if enabled and remote_state != "ENABLED": self.rule.enable() - elif not enabled and remote_state != 'DISABLED': + elif not enabled and remote_state != "DISABLED": self.rule.disable() def _create(self, enabled=True): @@ -428,53 +432,69 @@ class CloudWatchEventRuleManager(object): # The rule matches AWS only if all rule data fields are equal # to their corresponding local value defined in the task - return all( - getattr(self.rule, field) == aws_rule_data.get(field, None) - for field in self.RULE_FIELDS - ) + return all(getattr(self.rule, field) == aws_rule_data.get(field, None) for field in self.RULE_FIELDS) def _targets_to_put(self): """Returns a list of targets that need to be updated or added remotely""" remote_targets = self.rule.list_targets() - return [t for t in self.targets if t not in remote_targets] + + # keys with none values must be scrubbed off of self.targets + temp = [] + for t in self.targets: + if t["input_transformer"] is not None and t["input_transformer"]["input_template"] is not None: + # The remote_targets contain quotes, so add + # quotes to temp + val = t["input_transformer"]["input_template"] + # list_targets_by_rule return input_template as string + # if existing value is string "<instance> is in state <state>", it returns '"<instance> is in state <state>"' + # if existing value is <JSON>, it returns '<JSON>' + # therefore add quotes to provided input_template value only if it is not a JSON + valid_json = _validate_json(val) + if not valid_json: + t["input_transformer"]["input_template"] = '"' + val + '"' + temp.append(scrub_none_parameters(t)) + self.targets = temp + # remote_targets is snakified output of client.list_targets_by_rule() + # therefore snakified version of t should be compared to avoid wrong result of below conditional + return [t for t in self.targets if camel_dict_to_snake_dict(t) not in remote_targets] def _remote_target_ids_to_remove(self): """Returns a list of targets that need to be removed remotely""" - target_ids = [t['id'] for t in self.targets] + target_ids = [t["id"] for t in self.targets] remote_targets = self.rule.list_targets() - return [ - rt['id'] for rt in remote_targets if rt['id'] not in target_ids - ] + return [rt["id"] for rt in remote_targets if rt["id"] not in target_ids] def _remote_state(self): """Returns the remote state from AWS""" description = self.rule.describe() if not description: return - return description['state'] + return description["state"] def main(): target_args = dict( - type='list', elements='dict', default=[], + type="list", + elements="dict", + default=[], options=dict( - id=dict(type='str', required=True), - arn=dict(type='str', required=True), - role_arn=dict(type='str'), - input=dict(type='json'), - input_path=dict(type='str'), + id=dict(type="str", required=True), + arn=dict(type="str", required=True), + role_arn=dict(type="str"), + input=dict(type="json"), + input_path=dict(type="str"), input_transformer=dict( - type='dict', + type="dict", options=dict( - input_paths_map=dict(type='dict'), - input_template=dict(type='json'), + input_paths_map=dict(type="dict"), + input_template=dict(type="json"), ), ), ecs_parameters=dict( - type='dict', + type="dict", options=dict( - task_definition_arn=dict(type='str', required=True), - task_count=dict(type='int'), + task_definition_arn=dict(type="str", required=True), + task_count=dict(type="int"), ), ), ), @@ -482,36 +502,33 @@ def main(): argument_spec = dict( name=dict(required=True), schedule_expression=dict(), - event_pattern=dict(type='json'), - state=dict(choices=['present', 'disabled', 'absent'], - default='present'), + event_pattern=dict(type="json"), + state=dict(choices=["present", "disabled", "absent"], default="present"), description=dict(), role_arn=dict(), targets=target_args, ) module = AnsibleAWSModule(argument_spec=argument_spec) - rule_data = dict( - [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS] - ) - targets = module.params.get('targets') - state = module.params.get('state') - client = module.client('events') + rule_data = dict([(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]) + targets = module.params.get("targets") + state = module.params.get("state") + client = module.client("events") cwe_rule = CloudWatchEventRule(module, client=client, **rule_data) cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets) - if state == 'present': + if state == "present": cwe_rule_manager.ensure_present() - elif state == 'disabled': + elif state == "disabled": cwe_rule_manager.ensure_disabled() - elif state == 'absent': + elif state == "absent": cwe_rule_manager.ensure_absent() else: - module.fail_json(msg="Invalid state '{0}' provided".format(state)) + module.fail_json(msg=f"Invalid state '{state}' provided") module.exit_json(**cwe_rule_manager.fetch_aws_state()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py index ee6df826e..f499c478f 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudwatchlogs_log_group version_added: 5.0.0 @@ -60,14 +57,13 @@ options: required: false type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - amazon.aws.cloudwatchlogs_log_group: @@ -76,21 +72,24 @@ EXAMPLES = ''' - amazon.aws.cloudwatchlogs_log_group: state: present log_group_name: test-log-group - tags: { "Name": "test-log-group", "Env" : "QA" } + tags: + Name: "test-log-group" + Env: "QA" - amazon.aws.cloudwatchlogs_log_group: state: present log_group_name: test-log-group - tags: { "Name": "test-log-group", "Env" : "QA" } + tags: + Name: "test-log-group" + Env: QA kms_key_id: arn:aws:kms:region:account-id:key/key-id - amazon.aws.cloudwatchlogs_log_group: state: absent log_group_name: test-log-group +""" -''' - -RETURN = ''' +RETURN = r""" log_groups: description: Return the list of complex objects representing log groups returned: success @@ -130,7 +129,7 @@ log_groups: description: A dictionary representing the tags on the log group. returned: always type: dict -''' +""" try: import botocore @@ -139,17 +138,17 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): - request = {'logGroupName': log_group_name} + request = {"logGroupName": log_group_name} if kms_key_id: - request['kmsKeyId'] = kms_key_id + request["kmsKeyId"] = kms_key_id if tags: - request['tags'] = tags + request["tags"] = tags if module.check_mode: module.exit_json(changed=True, msg="Would have created log group if not in check_mode.") @@ -160,9 +159,7 @@ def create_log_group(client, log_group_name, kms_key_id, tags, retention, module module.fail_json_aws(e, msg="Unable to create log group") if retention: - input_retention_policy(client=client, - log_group_name=log_group_name, - retention=retention, module=module) + input_retention_policy(client=client, log_group_name=log_group_name, retention=retention, module=module) found_log_group = describe_log_group(client=client, log_group_name=log_group_name, module=module) @@ -176,13 +173,17 @@ def input_retention_policy(client, log_group_name, retention, module): permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653] if retention in permited_values: - response = client.put_retention_policy(logGroupName=log_group_name, - retentionInDays=retention) + client.put_retention_policy(logGroupName=log_group_name, retentionInDays=retention) else: delete_log_group(client=client, log_group_name=log_group_name, module=module) - module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]") + module.fail_json( + msg=( + "Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400," + " 545, 731, 1827, 3653]" + ) + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to put retention policy for log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to put retention policy for log group {log_group_name}") def delete_retention_policy(client, log_group_name, module): @@ -192,7 +193,7 @@ def delete_retention_policy(client, log_group_name, module): try: client.delete_retention_policy(logGroupName=log_group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete retention policy for log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to delete retention policy for log group {log_group_name}") def delete_log_group(client, log_group_name, module): @@ -201,19 +202,22 @@ def delete_log_group(client, log_group_name, module): try: client.delete_log_group(logGroupName=log_group_name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete log group {0}".format(log_group_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to delete log group {log_group_name}") def describe_log_group(client, log_group_name, module): try: desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}") - matching_logs = [log for log in desc_log_group.get('logGroups', []) if log['logGroupName'] == log_group_name] + matching_logs = [log for log in desc_log_group.get("logGroups", []) if log["logGroupName"] == log_group_name] if not matching_logs: return {} @@ -222,20 +226,23 @@ def describe_log_group(client, log_group_name, module): try: tags = client.list_tags_log_group(logGroupName=log_group_name) - except is_boto3_error_code('AccessDeniedException'): + except is_boto3_error_code("AccessDeniedException"): tags = {} - module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) - - found_log_group['tags'] = tags.get('tags', {}) + module.warn(f"Permission denied listing tags for log group {log_group_name}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}") + + found_log_group["tags"] = tags.get("tags", {}) return found_log_group def format_result(found_log_group): # Prior to 4.0.0 we documented returning log_groups=[log_group], but returned **log_group # Return both to avoid a breaking change. - log_group = camel_dict_to_snake_dict(found_log_group, ignore_list=['tags']) + log_group = camel_dict_to_snake_dict(found_log_group, ignore_list=["tags"]) return dict(log_groups=[log_group], **log_group) @@ -243,8 +250,8 @@ def ensure_tags(client, found_log_group, desired_tags, purge_tags, module): if desired_tags is None: return False - group_name = module.params.get('log_group_name') - current_tags = found_log_group.get('tags', {}) + group_name = module.params.get("log_group_name") + current_tags = found_log_group.get("tags", {}) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags) if not tags_to_add and not tags_to_remove: @@ -258,94 +265,101 @@ def ensure_tags(client, found_log_group, desired_tags, purge_tags, module): if tags_to_add: client.tag_log_group(logGroupName=group_name, tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update tags') + module.fail_json_aws(e, msg="Failed to update tags") return True def main(): argument_spec = dict( - log_group_name=dict(required=True, type='str'), - state=dict(choices=['present', 'absent'], - default='present'), - kms_key_id=dict(required=False, type='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), - retention=dict(required=False, type='int'), - purge_retention_policy=dict(required=False, type='bool', default=False), - overwrite=dict(required=False, type='bool', default=False), + log_group_name=dict(required=True, type="str"), + state=dict(choices=["present", "absent"], default="present"), + kms_key_id=dict(required=False, type="str"), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(required=False, type="bool", default=True), + retention=dict(required=False, type="int"), + purge_retention_policy=dict(required=False, type="bool", default=False), + overwrite=dict(required=False, type="bool", default=False), ) - mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']] - module = AnsibleAWSModule(supports_check_mode=True, argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) + mutually_exclusive = [["retention", "purge_retention_policy"], ["purge_retention_policy", "overwrite"]] + module = AnsibleAWSModule( + supports_check_mode=True, argument_spec=argument_spec, mutually_exclusive=mutually_exclusive + ) try: - logs = module.client('logs') + logs = module.client("logs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') + state = module.params.get("state") changed = False # Determine if the log group exists - found_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) + found_log_group = describe_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) - if state == 'present': + if state == "present": if found_log_group: - if module.params['overwrite'] is True: + if module.params["overwrite"] is True: changed = True - delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) - found_log_group = create_log_group(client=logs, - log_group_name=module.params['log_group_name'], - kms_key_id=module.params['kms_key_id'], - tags=module.params['tags'], - retention=module.params['retention'], - module=module) + delete_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) + found_log_group = create_log_group( + client=logs, + log_group_name=module.params["log_group_name"], + kms_key_id=module.params["kms_key_id"], + tags=module.params["tags"], + retention=module.params["retention"], + module=module, + ) else: - changed |= ensure_tags(client=logs, - found_log_group=found_log_group, - desired_tags=module.params['tags'], - purge_tags=module.params['purge_tags'], - module=module) - if module.params['purge_retention_policy']: - if found_log_group.get('retentionInDays'): + changed |= ensure_tags( + client=logs, + found_log_group=found_log_group, + desired_tags=module.params["tags"], + purge_tags=module.params["purge_tags"], + module=module, + ) + if module.params["purge_retention_policy"]: + if found_log_group.get("retentionInDays"): changed = True - delete_retention_policy(client=logs, - log_group_name=module.params['log_group_name'], - module=module) - elif module.params['retention'] != found_log_group.get('retentionInDays'): - if module.params['retention'] is not None: + delete_retention_policy( + client=logs, log_group_name=module.params["log_group_name"], module=module + ) + elif module.params["retention"] != found_log_group.get("retentionInDays"): + if module.params["retention"] is not None: changed = True - input_retention_policy(client=logs, - log_group_name=module.params['log_group_name'], - retention=module.params['retention'], - module=module) + input_retention_policy( + client=logs, + log_group_name=module.params["log_group_name"], + retention=module.params["retention"], + module=module, + ) if changed: - found_log_group = describe_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) + found_log_group = describe_log_group( + client=logs, log_group_name=module.params["log_group_name"], module=module + ) elif not found_log_group: changed = True - found_log_group = create_log_group(client=logs, - log_group_name=module.params['log_group_name'], - kms_key_id=module.params['kms_key_id'], - tags=module.params['tags'], - retention=module.params['retention'], - module=module) + found_log_group = create_log_group( + client=logs, + log_group_name=module.params["log_group_name"], + kms_key_id=module.params["kms_key_id"], + tags=module.params["tags"], + retention=module.params["retention"], + module=module, + ) result = format_result(found_log_group) module.exit_json(changed=changed, **result) - elif state == 'absent': + elif state == "absent": if found_log_group: changed = True - delete_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) + delete_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py index cb4c3808a..0cfe22e22 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudwatchlogs_log_group_info version_added: 5.0.0 @@ -23,18 +20,18 @@ options: - The name or prefix of the log group to filter by. type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - amazon.aws.cloudwatchlogs_log_group_info: log_group_name: test-log-group -''' +""" -RETURN = ''' +RETURN = r""" log_groups: description: Return the list of complex objects representing log groups returned: success @@ -74,7 +71,7 @@ log_groups: type: dict version_added: 4.0.0 version_added_collection: community.aws -''' +""" try: import botocore @@ -83,30 +80,33 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def describe_log_group(client, log_group_name, module): params = {} if log_group_name: - params['logGroupNamePrefix'] = log_group_name + params["logGroupNamePrefix"] = log_group_name try: - paginator = client.get_paginator('describe_log_groups') + paginator = client.get_paginator("describe_log_groups") desc_log_group = paginator.paginate(**params).build_full_result() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}") - for log_group in desc_log_group['logGroups']: - log_group_name = log_group['logGroupName'] + for log_group in desc_log_group["logGroups"]: + log_group_name = log_group["logGroupName"] try: tags = client.list_tags_log_group(logGroupName=log_group_name) - except is_boto3_error_code('AccessDeniedException'): + except is_boto3_error_code("AccessDeniedException"): tags = {} - module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) - log_group['tags'] = tags.get('tags', {}) + module.warn(f"Permission denied listing tags for log group {log_group_name}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}") + log_group["tags"] = tags.get("tags", {}) return desc_log_group @@ -119,21 +119,19 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - logs = module.client('logs') + logs = module.client("logs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - desc_log_group = describe_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) + desc_log_group = describe_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) final_log_group_snake = [] - for log_group in desc_log_group['logGroups']: - final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=['tags'])) + for log_group in desc_log_group["logGroups"]: + final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=["tags"])) desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake) module.exit_json(**desc_log_group_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py index 82435f4cb..b8bf0884b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudwatchlogs_log_group_metric_filter version_added: 5.0.0 @@ -59,13 +58,12 @@ options: - The value to emit when a filter pattern does not match a log event. type: float extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: set metric filter on log group /fluentd/testcase amazon.aws.cloudwatchlogs_log_group_metric_filter: log_group_name: /fluentd/testcase @@ -73,18 +71,18 @@ EXAMPLES = ''' filter_pattern: '{($.value = *) && ($.hostname = "box")}' state: present metric_transformation: - metric_name: box_free_space - metric_namespace: fluentd_metrics - metric_value: "$.value" + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: "$.value" - name: delete metric filter on log group /fluentd/testcase amazon.aws.cloudwatchlogs_log_group_metric_filter: log_group_name: /fluentd/testcase filter_name: BoxFreeStorage state: absent -''' +""" -RETURN = """ +RETURN = r""" metric_filters: description: Return the origin response value returned: success @@ -97,20 +95,17 @@ metric_filters: "metric_value": "$.value" } ] - """ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def metricTransformationHandler(metricTransformations, originMetricTransformations=None): - if originMetricTransformations: change = False - originMetricTransformations = camel_dict_to_snake_dict( - originMetricTransformations) + originMetricTransformations = camel_dict_to_snake_dict(originMetricTransformations) for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]: if metricTransformations.get(item) != originMetricTransformations.get(item): change = True @@ -121,18 +116,18 @@ def metricTransformationHandler(metricTransformations, originMetricTransformatio if isinstance(defaultValue, int) or isinstance(defaultValue, float): retval = [ { - 'metricName': metricTransformations.get("metric_name"), - 'metricNamespace': metricTransformations.get("metric_namespace"), - 'metricValue': metricTransformations.get("metric_value"), - 'defaultValue': defaultValue + "metricName": metricTransformations.get("metric_name"), + "metricNamespace": metricTransformations.get("metric_namespace"), + "metricValue": metricTransformations.get("metric_value"), + "defaultValue": defaultValue, } ] else: retval = [ { - 'metricName': metricTransformations.get("metric_name"), - 'metricNamespace': metricTransformations.get("metric_namespace"), - 'metricValue': metricTransformations.get("metric_value"), + "metricName": metricTransformations.get("metric_name"), + "metricNamespace": metricTransformations.get("metric_namespace"), + "metricValue": metricTransformations.get("metric_value"), } ] @@ -140,24 +135,26 @@ def metricTransformationHandler(metricTransformations, originMetricTransformatio def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - log_group_name=dict(type='str', required=True), - filter_name=dict(type='str', required=True), - filter_pattern=dict(type='str'), - metric_transformation=dict(type='dict', options=dict( - metric_name=dict(type='str'), - metric_namespace=dict(type='str'), - metric_value=dict(type='str'), - default_value=dict(type='float') - )), + state=dict(type="str", required=True, choices=["present", "absent"]), + log_group_name=dict(type="str", required=True), + filter_name=dict(type="str", required=True), + filter_pattern=dict(type="str"), + metric_transformation=dict( + type="dict", + options=dict( + metric_name=dict(type="str"), + metric_namespace=dict(type="str"), + metric_value=dict(type="str"), + default_value=dict(type="float"), + ), + ), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])] + required_if=[("state", "present", ["metric_transformation", "filter_pattern"])], ) log_group_name = module.params.get("log_group_name") @@ -166,19 +163,14 @@ def main(): metric_transformation = module.params.get("metric_transformation") state = module.params.get("state") - cwl = module.client('logs') + cwl = module.client("logs") # check if metric filter exists - response = cwl.describe_metric_filters( - logGroupName=log_group_name, - filterNamePrefix=filter_name - ) + response = cwl.describe_metric_filters(logGroupName=log_group_name, filterNamePrefix=filter_name) if len(response.get("metricFilters")) == 1: - originMetricTransformations = response.get( - "metricFilters")[0].get("metricTransformations")[0] - originFilterPattern = response.get("metricFilters")[ - 0].get("filterPattern") + originMetricTransformations = response.get("metricFilters")[0].get("metricTransformations")[0] + originFilterPattern = response.get("metricFilters")[0].get("filterPattern") else: originMetricTransformations = None originFilterPattern = None @@ -187,16 +179,14 @@ def main(): if state == "absent" and originMetricTransformations: if not module.check_mode: - response = cwl.delete_metric_filter( - logGroupName=log_group_name, - filterName=filter_name - ) + response = cwl.delete_metric_filter(logGroupName=log_group_name, filterName=filter_name) change = True metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]] elif state == "present": metricTransformation, change = metricTransformationHandler( - metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations) + metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations + ) change = change or filter_pattern != originFilterPattern @@ -206,7 +196,7 @@ def main(): logGroupName=log_group_name, filterName=filter_name, filterPattern=filter_pattern, - metricTransformations=metricTransformation + metricTransformations=metricTransformation, ) metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation] @@ -214,5 +204,5 @@ def main(): module.exit_json(changed=change, metric_filters=metricTransformation) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py index 537277c34..00ead5ce5 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_ami version_added: 1.0.0 @@ -117,12 +115,33 @@ options: type: bool launch_permissions: description: - - Users and groups that should be able to launch the AMI. - - Expects dictionary with a key of C(user_ids) and/or C(group_names). - - C(user_ids) should be a list of account IDs. - - C(group_name) should be a list of groups, C(all) is the only acceptable value currently. + - Launch permissions for the AMI. - You must pass all desired launch permissions if you wish to modify existing launch permissions (passing just groups will remove all users). + required: false type: dict + suboptions: + user_ids: + description: List of account IDs. + type: list + elements: str + required: false + group_names: + description: List of group names. + type: list + elements: str + required: false + org_arns: + description: List of The Amazon Resource Name(s) (ARN) of organization(s). + type: list + elements: str + required: false + version_added: 6.5.0 + org_unit_arns: + description: List of The Amazon Resource Name(s) (ARN) of an organizational unit(s) (OU). + type: list + elements: str + required: false + version_added: 6.5.0 image_location: description: - The S3 location of an image to use for the AMI. @@ -174,15 +193,15 @@ author: - "Ross Williams (@gunzy83) <gunzy83au@gmail.com>" - "Willem van Ketwich (@wilvk) <willvk@gmail.com>" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" # Thank you to iAcquire for sponsoring development of this module. -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Basic AMI Creation @@ -197,7 +216,7 @@ EXAMPLES = ''' - name: Basic AMI Creation, without waiting amazon.aws.ec2_ami: instance_id: i-xxxxxx - wait: no + wait: false name: newtest - name: AMI Registration from EBS Snapshot @@ -219,26 +238,26 @@ EXAMPLES = ''' instance_id: i-xxxxxx name: newtest device_mapping: - - device_name: /dev/sda1 - size: XXX - delete_on_termination: true - volume_type: gp2 - - device_name: /dev/sdb - size: YYY - delete_on_termination: false - volume_type: gp2 + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + size: YYY + delete_on_termination: false + volume_type: gp2 - name: AMI Creation, excluding a volume attached at /dev/sdb amazon.aws.ec2_ami: instance_id: i-xxxxxx name: newtest device_mapping: - - device_name: /dev/sda1 - size: XXX - delete_on_termination: true - volume_type: gp2 - - device_name: /dev/sdb - no_device: true + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + no_device: true - name: AMI Creation with boot_mode and tpm_support amazon.aws.ec2_ami: @@ -248,9 +267,9 @@ EXAMPLES = ''' virtualization_type: hvm root_device_name: /dev/sda1 device_mapping: - - device_name: /dev/sda1 - snapshot_id: "{{ snapshot_id }}" - wait: yes + - device_name: /dev/sda1 + snapshot_id: "{{ snapshot_id }}" + wait: true region: us-east-1 boot_mode: uefi uefi_data: data_file.bin @@ -259,13 +278,13 @@ EXAMPLES = ''' - name: Deregister/Delete AMI (keep associated snapshots) amazon.aws.ec2_ami: image_id: "{{ instance.image_id }}" - delete_snapshot: False + delete_snapshot: false state: absent - name: Deregister AMI (delete associated snapshots too) amazon.aws.ec2_ami: image_id: "{{ instance.image_id }}" - delete_snapshot: True + delete_snapshot: true state: absent - name: Update AMI Launch Permissions, making it public @@ -281,9 +300,17 @@ EXAMPLES = ''' state: present launch_permissions: user_ids: ['123456789012'] -''' -RETURN = ''' +- name: Update AMI Launch Permissions, share AMI across an Organization and Organizational Units + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + org_arns: ['arn:aws:organizations::123456789012:organization/o-123ab4cdef'] + org_unit_arns: ['arn:aws:organizations::123456789012:ou/o-123example/ou-1234-5example'] +""" + +RETURN = r""" architecture: description: Architecture of image. returned: when AMI is created or already exists @@ -389,7 +416,7 @@ snapshots_deleted: "snap-fbcccb8f", "snap-cfe7cdb4" ] -''' +""" import time @@ -400,33 +427,40 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +class Ec2AmiFailure(Exception): + def __init__(self, message=None, original_e=None): + super().__init__(message) + self.original_e = original_e + self.message = message + + def get_block_device_mapping(image): - bdm_dict = dict() - if image is not None and image.get('block_device_mappings') is not None: - bdm = image.get('block_device_mappings') + bdm_dict = {} + if image is not None and image.get("block_device_mappings") is not None: + bdm = image.get("block_device_mappings") for device in bdm: - device_name = device.get('device_name') - if 'ebs' in device: + device_name = device.get("device_name") + if "ebs" in device: ebs = device.get("ebs") bdm_dict_item = { - 'size': ebs.get("volume_size"), - 'snapshot_id': ebs.get("snapshot_id"), - 'volume_type': ebs.get("volume_type"), - 'encrypted': ebs.get("encrypted"), - 'delete_on_termination': ebs.get("delete_on_termination") + "size": ebs.get("volume_size"), + "snapshot_id": ebs.get("snapshot_id"), + "volume_type": ebs.get("volume_type"), + "encrypted": ebs.get("encrypted"), + "delete_on_termination": ebs.get("delete_on_termination"), } - elif 'virtual_name' in device: - bdm_dict_item = dict(virtual_name=device['virtual_name']) + elif "virtual_name" in device: + bdm_dict_item = dict(virtual_name=device["virtual_name"]) bdm_dict[device_name] = bdm_dict_item return bdm_dict @@ -448,7 +482,7 @@ def get_ami_info(camel_image): root_device_type=image.get("root_device_type"), virtualization_type=image.get("virtualization_type"), name=image.get("name"), - tags=boto3_tag_list_to_ansible_dict(image.get('tags')), + tags=boto3_tag_list_to_ansible_dict(image.get("tags")), platform=image.get("platform"), enhanced_networking=image.get("ena_support"), image_owner_alias=image.get("image_owner_alias"), @@ -458,363 +492,526 @@ def get_ami_info(camel_image): ramdisk_id=image.get("ramdisk_id"), sriov_net_support=image.get("sriov_net_support"), state_reason=image.get("state_reason"), - launch_permissions=image.get('launch_permissions') + launch_permissions=image.get("launch_permissions"), ) -def create_image(module, connection): - instance_id = module.params.get('instance_id') - name = module.params.get('name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - description = module.params.get('description') - architecture = module.params.get('architecture') - kernel_id = module.params.get('kernel_id') - root_device_name = module.params.get('root_device_name') - virtualization_type = module.params.get('virtualization_type') - no_reboot = module.params.get('no_reboot') - device_mapping = module.params.get('device_mapping') - tags = module.params.get('tags') - launch_permissions = module.params.get('launch_permissions') - image_location = module.params.get('image_location') - enhanced_networking = module.params.get('enhanced_networking') - billing_products = module.params.get('billing_products') - ramdisk_id = module.params.get('ramdisk_id') - sriov_net_support = module.params.get('sriov_net_support') - boot_mode = module.params.get('boot_mode') - tpm_support = module.params.get('tpm_support') - uefi_data = module.params.get('uefi_data') - - if tpm_support and boot_mode != 'uefi': - module.fail_json(msg="To specify 'tpm_support', 'boot_mode' must be 'uefi'.") - - if module.check_mode: - image = connection.describe_images(Filters=[{'Name': 'name', 'Values': [str(name)]}]) - if not image['Images']: - module.exit_json(changed=True, msg='Would have created a AMI if not in check mode.') - else: - module.exit_json(changed=False, msg='Error registering image: AMI name is already in use by another AMI') +def get_image_by_id(connection, image_id): + try: + images_response = connection.describe_images(aws_retry=True, ImageIds=[image_id]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise Ec2AmiFailure("Error retrieving image by image_id", e) + images = images_response.get("Images", []) + image_counter = len(images) + if image_counter == 0: + return None + + if image_counter > 1: + raise Ec2AmiFailure(f"Invalid number of instances ({str(len(images))}) found for image_id: {image_id}.") + + result = images[0] try: - params = { - 'Name': name, - 'Description': description - } + result["LaunchPermissions"] = connection.describe_image_attribute( + aws_retry=True, Attribute="launchPermission", ImageId=image_id + )["LaunchPermissions"] + result["ProductCodes"] = connection.describe_image_attribute( + aws_retry=True, Attribute="productCodes", ImageId=image_id + )["ProductCodes"] + except is_boto3_error_code("InvalidAMIID.Unavailable"): + pass + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise Ec2AmiFailure(f"Error retrieving image attributes for image {image_id}", e) + return result - block_device_mapping = None - # Remove empty values injected by using options - if device_mapping: - block_device_mapping = [] - for device in device_mapping: - device = dict((k, v) for k, v in device.items() if v is not None) - device['Ebs'] = {} - device = rename_item_if_exists(device, 'device_name', 'DeviceName') - device = rename_item_if_exists(device, 'virtual_name', 'VirtualName') - device = rename_item_if_exists(device, 'no_device', 'NoDevice') - device = rename_item_if_exists(device, 'volume_type', 'VolumeType', 'Ebs') - device = rename_item_if_exists(device, 'snapshot_id', 'SnapshotId', 'Ebs') - device = rename_item_if_exists(device, 'delete_on_termination', 'DeleteOnTermination', 'Ebs') - device = rename_item_if_exists(device, 'size', 'VolumeSize', 'Ebs', attribute_type=int) - device = rename_item_if_exists(device, 'volume_size', 'VolumeSize', 'Ebs', attribute_type=int) - device = rename_item_if_exists(device, 'iops', 'Iops', 'Ebs') - device = rename_item_if_exists(device, 'encrypted', 'Encrypted', 'Ebs') - - # The NoDevice parameter in Boto3 is a string. Empty string omits the device from block device mapping - # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_image - if 'NoDevice' in device: - if device['NoDevice'] is True: - device['NoDevice'] = "" - else: - del device['NoDevice'] - block_device_mapping.append(device) - if block_device_mapping: - params['BlockDeviceMappings'] = block_device_mapping - if instance_id: - params['InstanceId'] = instance_id - params['NoReboot'] = no_reboot - tag_spec = boto3_tag_specifications(tags, types=['image', 'snapshot']) - if tag_spec: - params['TagSpecifications'] = tag_spec - image_id = connection.create_image(aws_retry=True, **params).get('ImageId') + +def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None): + new_item = dict_object.get(attribute) + if new_item is not None: + if attribute_type is not None: + new_item = attribute_type(new_item) + if child_node is None: + dict_object[new_attribute] = new_item else: - if architecture: - params['Architecture'] = architecture - if virtualization_type: - params['VirtualizationType'] = virtualization_type - if image_location: - params['ImageLocation'] = image_location - if enhanced_networking: - params['EnaSupport'] = enhanced_networking - if billing_products: - params['BillingProducts'] = billing_products - if ramdisk_id: - params['RamdiskId'] = ramdisk_id - if sriov_net_support: - params['SriovNetSupport'] = sriov_net_support - if kernel_id: - params['KernelId'] = kernel_id - if root_device_name: - params['RootDeviceName'] = root_device_name - if boot_mode: - params['BootMode'] = boot_mode - if tpm_support: - params['TpmSupport'] = tpm_support - if uefi_data: - params['UefiData'] = uefi_data - image_id = connection.register_image(aws_retry=True, **params).get('ImageId') - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error registering image") + dict_object[child_node][new_attribute] = new_item + dict_object.pop(attribute) + return dict_object - if wait: - delay = 15 - max_attempts = wait_timeout // delay - waiter = get_waiter(connection, 'image_available') - waiter.wait(ImageIds=[image_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)) - - if tags and 'TagSpecifications' not in params: - image_info = get_image_by_id(module, connection, image_id) - add_ec2_tags(connection, module, image_id, tags) - if image_info and image_info.get('BlockDeviceMappings'): - for mapping in image_info.get('BlockDeviceMappings'): - # We can only tag Ebs volumes - if 'Ebs' not in mapping: - continue - add_ec2_tags(connection, module, mapping.get('Ebs').get('SnapshotId'), tags) - if launch_permissions: - try: - params = dict(Attribute='LaunchPermission', ImageId=image_id, LaunchPermission=dict(Add=list())) - for group_name in launch_permissions.get('group_names', []): - params['LaunchPermission']['Add'].append(dict(Group=group_name)) - for user_id in launch_permissions.get('user_ids', []): - params['LaunchPermission']['Add'].append(dict(UserId=str(user_id))) - if params['LaunchPermission']['Add']: - connection.modify_image_attribute(aws_retry=True, **params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error setting launch permissions for image %s" % image_id) - - module.exit_json(msg="AMI creation operation complete.", changed=True, - **get_ami_info(get_image_by_id(module, connection, image_id))) - - -def deregister_image(module, connection): - image_id = module.params.get('image_id') - delete_snapshot = module.params.get('delete_snapshot') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - image = get_image_by_id(module, connection, image_id) - - if image is None: - module.exit_json(changed=False) - - # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable. - snapshots = [] - if 'BlockDeviceMappings' in image: - for mapping in image.get('BlockDeviceMappings'): - snapshot_id = mapping.get('Ebs', {}).get('SnapshotId') - if snapshot_id is not None: - snapshots.append(snapshot_id) - - # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes. - if 'ImageId' in image: - if module.check_mode: - module.exit_json(changed=True, msg='Would have deregistered AMI if not in check mode.') - try: - connection.deregister_image(aws_retry=True, ImageId=image_id) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error deregistering image") - else: - module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False) +def validate_params( + module, + image_id=None, + instance_id=None, + name=None, + state=None, + tpm_support=None, + uefi_data=None, + boot_mode=None, + device_mapping=None, + **_, +): + # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by + # the required_if for state=absent, so check manually instead + if not (image_id or name): + module.fail_json("one of the following is required: name, image_id") - image = get_image_by_id(module, connection, image_id) - wait_timeout = time.time() + wait_timeout + if tpm_support and boot_mode != "uefi": + module.fail_json("To specify 'tpm_support', 'boot_mode' must be 'uefi'.") - while wait and wait_timeout > time.time() and image is not None: - image = get_image_by_id(module, connection, image_id) - time.sleep(3) + if state == "present" and not image_id and not (instance_id or device_mapping): + module.fail_json( + "The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image." + ) - if wait and wait_timeout <= time.time(): - module.fail_json(msg="Timed out waiting for image to be deregistered.") - exit_params = {'msg': "AMI deregister operation complete.", 'changed': True} +class DeregisterImage: + @staticmethod + def do_check_mode(module, connection, image_id): + image = get_image_by_id(connection, image_id) - if delete_snapshot: - for snapshot_id in snapshots: + if image is None: + module.exit_json(changed=False) + + if "ImageId" in image: + module.exit_json(changed=True, msg="Would have deregistered AMI if not in check mode.") + else: + module.exit_json(msg=f"Image {image_id} has already been deregistered.", changed=False) + + @staticmethod + def defer_purge_snapshots(image): + def purge_snapshots(connection): try: - connection.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) - # Don't error out if root volume snapshot was already deregistered as part of deregister_image - except is_boto3_error_code('InvalidSnapshot.NotFound'): + for mapping in image.get("BlockDeviceMappings") or []: + snapshot_id = mapping.get("Ebs", {}).get("SnapshotId") + if snapshot_id is None: + continue + connection.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) + yield snapshot_id + except is_boto3_error_code("InvalidSnapshot.NotFound"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete snapshot.') - exit_params['snapshots_deleted'] = snapshots + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + raise Ec2AmiFailure("Failed to delete snapshot.", e) + + return purge_snapshots + + @staticmethod + def timeout(connection, image_id, wait_timeout): + image = get_image_by_id(connection, image_id) + wait_till = time.time() + wait_timeout + + while wait_till > time.time() and image is not None: + image = get_image_by_id(connection, image_id) + time.sleep(3) + + if wait_till <= time.time(): + raise Ec2AmiFailure("Timed out waiting for image to be deregistered.") + + @classmethod + def do(cls, module, connection, image_id): + """Entry point to deregister an image""" + delete_snapshot = module.params.get("delete_snapshot") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + image = get_image_by_id(connection, image_id) + + if image is None: + module.exit_json(changed=False) + + # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable. + purge_snapshots = cls.defer_purge_snapshots(image) + + # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes. + if "ImageId" in image: + try: + connection.deregister_image(aws_retry=True, ImageId=image_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise Ec2AmiFailure("Error deregistering image", e) + else: + module.exit_json(msg=f"Image {image_id} has already been deregistered.", changed=False) + + if wait: + cls.timeout(connection, image_id, wait_timeout) - module.exit_json(**exit_params) + exit_params = {"msg": "AMI deregister operation complete.", "changed": True} + if delete_snapshot: + exit_params["snapshots_deleted"] = list(purge_snapshots(connection)) -def update_image(module, connection, image_id): - launch_permissions = module.params.get('launch_permissions') - image = get_image_by_id(module, connection, image_id) - if image is None: - module.fail_json(msg="Image %s does not exist" % image_id, changed=False) - changed = False + module.exit_json(**exit_params) - if launch_permissions is not None: - current_permissions = image['LaunchPermissions'] - current_users = set(permission['UserId'] for permission in current_permissions if 'UserId' in permission) - desired_users = set(str(user_id) for user_id in launch_permissions.get('user_ids', [])) - current_groups = set(permission['Group'] for permission in current_permissions if 'Group' in permission) - desired_groups = set(launch_permissions.get('group_names', [])) +class UpdateImage: + @staticmethod + def set_launch_permission(connection, image, launch_permissions, check_mode): + if launch_permissions is None: + return False + + current_permissions = image["LaunchPermissions"] + + current_users = set(permission["UserId"] for permission in current_permissions if "UserId" in permission) + desired_users = set(str(user_id) for user_id in launch_permissions.get("user_ids", [])) + current_groups = set(permission["Group"] for permission in current_permissions if "Group" in permission) + desired_groups = set(launch_permissions.get("group_names", [])) + current_org_arns = set( + permission["OrganizationArn"] for permission in current_permissions if "OrganizationArn" in permission + ) + desired_org_arns = set(str(org_arn) for org_arn in launch_permissions.get("org_arns", [])) + current_org_unit_arns = set( + permission["OrganizationalUnitArn"] + for permission in current_permissions + if "OrganizationalUnitArn" in permission + ) + desired_org_unit_arns = set(str(org_unit_arn) for org_unit_arn in launch_permissions.get("org_unit_arns", [])) to_add_users = desired_users - current_users to_remove_users = current_users - desired_users to_add_groups = desired_groups - current_groups to_remove_groups = current_groups - desired_groups + to_add_org_arns = desired_org_arns - current_org_arns + to_remove_org_arns = current_org_arns - desired_org_arns + to_add_org_unit_arns = desired_org_unit_arns - current_org_unit_arns + to_remove_org_unit_arns = current_org_unit_arns - desired_org_unit_arns + + to_add = ( + [dict(Group=group) for group in sorted(to_add_groups)] + + [dict(UserId=user_id) for user_id in sorted(to_add_users)] + + [dict(OrganizationArn=org_arn) for org_arn in sorted(to_add_org_arns)] + + [dict(OrganizationalUnitArn=org_unit_arn) for org_unit_arn in sorted(to_add_org_unit_arns)] + ) + + to_remove = ( + [dict(Group=group) for group in sorted(to_remove_groups)] + + [dict(UserId=user_id) for user_id in sorted(to_remove_users)] + + [dict(OrganizationArn=org_arn) for org_arn in sorted(to_remove_org_arns)] + + [dict(OrganizationalUnitArn=org_unit_arn) for org_unit_arn in sorted(to_remove_org_unit_arns)] + ) + + if not (to_add or to_remove): + return False + + try: + if not check_mode: + connection.modify_image_attribute( + aws_retry=True, + ImageId=image["ImageId"], + Attribute="launchPermission", + LaunchPermission=dict(Add=to_add, Remove=to_remove), + ) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise Ec2AmiFailure(f"Error updating launch permissions of image {image['ImageId']}", e) + return changed - to_add = [dict(Group=group) for group in to_add_groups] + [dict(UserId=user_id) for user_id in to_add_users] - to_remove = [dict(Group=group) for group in to_remove_groups] + [dict(UserId=user_id) for user_id in to_remove_users] + @staticmethod + def set_tags(connection, module, image_id, tags, purge_tags): + if not tags: + return False - if to_add or to_remove: - try: - if not module.check_mode: - connection.modify_image_attribute(aws_retry=True, - ImageId=image_id, Attribute='launchPermission', - LaunchPermission=dict(Add=to_add, Remove=to_remove)) - changed = True - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error updating launch permissions of image %s" % image_id) + return ensure_ec2_tags(connection, module, image_id, tags=tags, purge_tags=purge_tags) - desired_tags = module.params.get('tags') - if desired_tags is not None: - changed |= ensure_ec2_tags(connection, module, image_id, tags=desired_tags, purge_tags=module.params.get('purge_tags')) + @staticmethod + def set_description(connection, module, image, description): + if not description: + return False + + if description == image["Description"]: + return False - description = module.params.get('description') - if description and description != image['Description']: try: if not module.check_mode: - connection.modify_image_attribute(aws_retry=True, Attribute='Description ', ImageId=image_id, Description=dict(Value=description)) - changed = True + connection.modify_image_attribute( + aws_retry=True, + Attribute="Description", + ImageId=image["ImageId"], + Description={"Value": description}, + ) + return True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error setting description for image %s" % image_id) + raise Ec2AmiFailure(f"Error setting description for image {image['ImageId']}", e) + + @classmethod + def do(cls, module, connection, image_id): + """Entry point to update an image""" + launch_permissions = module.params.get("launch_permissions") + # remove any keys with value=None + if launch_permissions: + launch_permissions = {k: v for k, v in launch_permissions.items() if v is not None} + + image = get_image_by_id(connection, image_id) + if image is None: + raise Ec2AmiFailure(f"Image {image_id} does not exist") + + changed = False + changed |= cls.set_launch_permission(connection, image, launch_permissions, module.check_mode) + changed |= cls.set_tags(connection, module, image_id, module.params["tags"], module.params["purge_tags"]) + changed |= cls.set_description(connection, module, image, module.params["description"]) + + if changed and module.check_mode: + module.exit_json(changed=True, msg="Would have updated AMI if not in check mode.") + elif changed: + module.exit_json(msg="AMI updated.", changed=True, **get_ami_info(get_image_by_id(connection, image_id))) + else: + module.exit_json(msg="AMI not updated.", changed=False, **get_ami_info(image)) - if changed: - if module.check_mode: - module.exit_json(changed=True, msg='Would have updated AMI if not in check mode.') - module.exit_json(msg="AMI updated.", changed=True, - **get_ami_info(get_image_by_id(module, connection, image_id))) - else: - module.exit_json(msg="AMI not updated.", changed=False, - **get_ami_info(get_image_by_id(module, connection, image_id))) +class CreateImage: + @staticmethod + def do_check_mode(module, connection, _image_id): + image = connection.describe_images(Filters=[{"Name": "name", "Values": [str(module.params["name"])]}]) + if not image["Images"]: + module.exit_json(changed=True, msg="Would have created a AMI if not in check mode.") + else: + module.exit_json(changed=False, msg="Error registering image: AMI name is already in use by another AMI") -def get_image_by_id(module, connection, image_id): - try: + @staticmethod + def wait(connection, wait_timeout, image_id): + if not wait_timeout: + return + + delay = 15 + max_attempts = wait_timeout // delay + waiter = get_waiter(connection, "image_available") + waiter.wait(ImageIds=[image_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}) + + @staticmethod + def set_tags(connection, module, tags, image_id): + if not tags: + return + + image_info = get_image_by_id(connection, image_id) + add_ec2_tags(connection, module, image_id, module.params["tags"]) + if image_info and image_info.get("BlockDeviceMappings"): + for mapping in image_info.get("BlockDeviceMappings"): + # We can only tag Ebs volumes + if "Ebs" not in mapping: + continue + add_ec2_tags(connection, module, mapping.get("Ebs").get("SnapshotId"), tags) + + @staticmethod + def set_launch_permissions(connection, launch_permissions, image_id): + if not launch_permissions: + return + # remove any keys with value=None + launch_permissions = {k: v for k, v in launch_permissions.items() if v is not None} try: - images_response = connection.describe_images(aws_retry=True, ImageIds=[image_id]) + params = {"Attribute": "LaunchPermission", "ImageId": image_id, "LaunchPermission": {"Add": []}} + for group_name in launch_permissions.get("group_names", []): + params["LaunchPermission"]["Add"].append(dict(Group=group_name)) + for user_id in launch_permissions.get("user_ids", []): + params["LaunchPermission"]["Add"].append(dict(UserId=str(user_id))) + for org_arn in launch_permissions.get("org_arns", []): + params["LaunchPermission"]["Add"].append(dict(OrganizationArn=org_arn)) + for org_unit_arn in launch_permissions.get("org_unit_arns", []): + params["LaunchPermission"]["Add"].append(dict(OrganizationalUnitArn=org_unit_arn)) + if params["LaunchPermission"]["Add"]: + connection.modify_image_attribute(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error retrieving image %s" % image_id) - images = images_response.get('Images') - no_images = len(images) - if no_images == 0: - return None - if no_images == 1: - result = images[0] - try: - result['LaunchPermissions'] = connection.describe_image_attribute(aws_retry=True, Attribute='launchPermission', - ImageId=image_id)['LaunchPermissions'] - result['ProductCodes'] = connection.describe_image_attribute(aws_retry=True, Attribute='productCodes', - ImageId=image_id)['ProductCodes'] - except is_boto3_error_code('InvalidAMIID.Unavailable'): - pass - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id) - return result - module.fail_json(msg="Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error retrieving image by image_id") + raise Ec2AmiFailure(f"Error setting launch permissions for image {image_id}", e) + @staticmethod + def create_or_register(connection, create_image_parameters): + create_from_instance = "InstanceId" in create_image_parameters + func = connection.create_image if create_from_instance else connection.register_image + return func -def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None): - new_item = dict_object.get(attribute) - if new_item is not None: - if attribute_type is not None: - new_item = attribute_type(new_item) - if child_node is None: - dict_object[new_attribute] = new_item + @staticmethod + def build_block_device_mapping(device_mapping): + # Remove empty values injected by using options + block_device_mapping = [] + for device in device_mapping: + device = {k: v for k, v in device.items() if v is not None} + device["Ebs"] = {} + rename_item_if_exists(device, "delete_on_termination", "DeleteOnTermination", "Ebs") + rename_item_if_exists(device, "device_name", "DeviceName") + rename_item_if_exists(device, "encrypted", "Encrypted", "Ebs") + rename_item_if_exists(device, "iops", "Iops", "Ebs") + rename_item_if_exists(device, "no_device", "NoDevice") + rename_item_if_exists(device, "size", "VolumeSize", "Ebs", attribute_type=int) + rename_item_if_exists(device, "snapshot_id", "SnapshotId", "Ebs") + rename_item_if_exists(device, "virtual_name", "VirtualName") + rename_item_if_exists(device, "volume_size", "VolumeSize", "Ebs", attribute_type=int) + rename_item_if_exists(device, "volume_type", "VolumeType", "Ebs") + + # The NoDevice parameter in Boto3 is a string. Empty string omits the device from block device mapping + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_image + if "NoDevice" in device: + if device["NoDevice"] is True: + device["NoDevice"] = "" + else: + del device["NoDevice"] + block_device_mapping.append(device) + return block_device_mapping + + @staticmethod + def build_create_image_parameters(**kwargs): + architecture = kwargs.get("architecture") + billing_products = kwargs.get("billing_products") + boot_mode = kwargs.get("boot_mode") + description = kwargs.get("description") + device_mapping = kwargs.get("device_mapping") or [] + enhanced_networking = kwargs.get("enhanced_networking") + image_location = kwargs.get("image_location") + instance_id = kwargs.get("instance_id") + kernel_id = kwargs.get("kernel_id") + name = kwargs.get("name") + no_reboot = kwargs.get("no_reboot") + ramdisk_id = kwargs.get("ramdisk_id") + root_device_name = kwargs.get("root_device_name") + sriov_net_support = kwargs.get("sriov_net_support") + tags = kwargs.get("tags") + tpm_support = kwargs.get("tpm_support") + uefi_data = kwargs.get("uefi_data") + virtualization_type = kwargs.get("virtualization_type") + + params = { + "Name": name, + "Description": description, + "BlockDeviceMappings": CreateImage.build_block_device_mapping(device_mapping), + } + + # Remove empty values injected by using options + if instance_id: + params.update( + { + "InstanceId": instance_id, + "NoReboot": no_reboot, + "TagSpecifications": boto3_tag_specifications(tags, types=["image", "snapshot"]), + } + ) else: - dict_object[child_node][new_attribute] = new_item - dict_object.pop(attribute) - return dict_object + params.update( + { + "Architecture": architecture, + "BillingProducts": billing_products, + "BootMode": boot_mode, + "EnaSupport": enhanced_networking, + "ImageLocation": image_location, + "KernelId": kernel_id, + "RamdiskId": ramdisk_id, + "RootDeviceName": root_device_name, + "SriovNetSupport": sriov_net_support, + "TpmSupport": tpm_support, + "UefiData": uefi_data, + "VirtualizationType": virtualization_type, + } + ) + + return {k: v for k, v in params.items() if v} + + @classmethod + def do(cls, module, connection, _image_id): + """Entry point to create image""" + create_image_parameters = cls.build_create_image_parameters(**module.params) + + func = cls.create_or_register(connection, create_image_parameters) + try: + image = func(aws_retry=True, **create_image_parameters) + image_id = image.get("ImageId") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise Ec2AmiFailure("Error registering image", e) + + cls.wait(connection, module.params.get("wait") and module.params.get("wait_timeout"), image_id) + + if "TagSpecifications" not in create_image_parameters: + CreateImage.set_tags(connection, module, module.params.get("tags"), image_id) + + cls.set_launch_permissions(connection, module.params.get("launch_permissions"), image_id) + + module.exit_json( + msg="AMI creation operation complete.", changed=True, **get_ami_info(get_image_by_id(connection, image_id)) + ) def main(): - mapping_options = dict( - device_name=dict(type='str', required=True), - virtual_name=dict(type='str'), - no_device=dict(type='bool'), - volume_type=dict(type='str'), - delete_on_termination=dict(type='bool'), - snapshot_id=dict(type='str'), - iops=dict(type='int'), - encrypted=dict(type='bool'), - volume_size=dict(type='int', aliases=['size']), - ) + mapping_options = { + "delete_on_termination": {"type": "bool"}, + "device_name": {"type": "str", "required": True}, + "encrypted": {"type": "bool"}, + "iops": {"type": "int"}, + "no_device": {"type": "bool"}, + "snapshot_id": {"type": "str"}, + "virtual_name": {"type": "str"}, + "volume_size": {"type": "int", "aliases": ["size"]}, + "volume_type": {"type": "str"}, + } argument_spec = dict( - instance_id=dict(), - image_id=dict(), - architecture=dict(default='x86_64'), - kernel_id=dict(), - virtualization_type=dict(default='hvm'), - root_device_name=dict(), - delete_snapshot=dict(default=False, type='bool'), - name=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(default=1200, type='int'), - description=dict(default=''), - no_reboot=dict(default=False, type='bool'), - state=dict(default='present', choices=['present', 'absent']), - device_mapping=dict(type='list', elements='dict', options=mapping_options), - launch_permissions=dict(type='dict'), - image_location=dict(), - enhanced_networking=dict(type='bool'), - billing_products=dict(type='list', elements='str',), - ramdisk_id=dict(), - sriov_net_support=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - boot_mode=dict(type='str', choices=['legacy-bios', 'uefi']), - tpm_support=dict(type='str'), - uefi_data=dict(type='str'), + architecture={"default": "x86_64"}, + billing_products={"type": "list", "elements": "str"}, + boot_mode={"type": "str", "choices": ["legacy-bios", "uefi"]}, + delete_snapshot={"default": False, "type": "bool"}, + description={"default": ""}, + device_mapping={"type": "list", "elements": "dict", "options": mapping_options}, + enhanced_networking={"type": "bool"}, + image_id={}, + image_location={}, + instance_id={}, + kernel_id={}, + launch_permissions=dict( + type="dict", + options=dict( + user_ids=dict(type="list", elements="str"), + group_names=dict(type="list", elements="str"), + org_arns=dict(type="list", elements="str"), + org_unit_arns=dict(type="list", elements="str"), + ), + ), + name={}, + no_reboot={"default": False, "type": "bool"}, + purge_tags={"type": "bool", "default": True}, + ramdisk_id={}, + root_device_name={}, + sriov_net_support={}, + state={"default": "present", "choices": ["present", "absent"]}, + tags={"type": "dict", "aliases": ["resource_tags"]}, + tpm_support={"type": "str"}, + uefi_data={"type": "str"}, + virtualization_type={"default": "hvm"}, + wait={"type": "bool", "default": False}, + wait_timeout={"default": 1200, "type": "int"}, ) module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[ - ['state', 'absent', ['image_id']], + ["state", "absent", ["image_id"]], ], supports_check_mode=True, ) - # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by - # the required_if for state=absent, so check manually instead - if not any([module.params['image_id'], module.params['name']]): - module.fail_json(msg="one of the following is required: name, image_id") + validate_params(module, **module.params) - if any([module.params['tpm_support'], module.params['uefi_data']]): - module.require_botocore_at_least('1.26.0', reason='required for ec2.register_image with tpm_support or uefi_data') + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + CHECK_MODE_TRUE = True + CHECK_MODE_FALSE = False + HAS_IMAGE_ID_TRUE = True + HAS_IMAGE_ID_FALSE = False - if module.params.get('state') == 'absent': - deregister_image(module, connection) - elif module.params.get('state') == 'present': - if module.params.get('image_id'): - update_image(module, connection, module.params.get('image_id')) - if not module.params.get('instance_id') and not module.params.get('device_mapping'): - module.fail_json(msg="The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image.") - create_image(module, connection) + func_mapping = { + CHECK_MODE_TRUE: { + HAS_IMAGE_ID_TRUE: {"absent": DeregisterImage.do_check_mode, "present": UpdateImage.do}, + HAS_IMAGE_ID_FALSE: {"present": CreateImage.do_check_mode}, + }, + CHECK_MODE_FALSE: { + HAS_IMAGE_ID_TRUE: {"absent": DeregisterImage.do, "present": UpdateImage.do}, + HAS_IMAGE_ID_FALSE: {"present": CreateImage.do}, + }, + } + func = func_mapping[module.check_mode][bool(module.params.get("image_id"))][module.params["state"]] + try: + func(module, connection, module.params.get("image_id")) + except Ec2AmiFailure as e: + if e.original_e: + module.fail_json_aws(e.original_e, e.message) + else: + module.fail_json(e.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py index 3d67e89de..2929a0292 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_ami_info version_added: 1.0.0 @@ -51,12 +49,12 @@ options: type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: gather information about an AMI using ami-id @@ -78,9 +76,9 @@ EXAMPLES = ''' owners: 099720109477 filters: name: "ubuntu/images/ubuntu-zesty-17.04-*" -''' +""" -RETURN = ''' +RETURN = r""" images: description: A list of images. returned: always @@ -199,29 +197,35 @@ images: returned: always type: str sample: hvm -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list -def list_ec2_images(ec2_client, module): +class AmiInfoFailure(Exception): + def __init__(self, original_e, user_message): + self.original_e = original_e + self.user_message = user_message + super().__init__(self) - image_ids = module.params.get("image_ids") - owners = module.params.get("owners") - executable_users = module.params.get("executable_users") - filters = module.params.get("filters") - owner_param = [] + +def build_request_args(executable_users, filters, image_ids, owners): + request_args = { + "ExecutableUsers": [str(user) for user in executable_users], + "ImageIds": [str(image_id) for image_id in image_ids], + } # describe_images is *very* slow if you pass the `Owners` # param (unless it's self), for some reason. @@ -230,58 +234,88 @@ def list_ec2_images(ec2_client, module): # Implementation based on aioue's suggestion in #24886 for owner in owners: if owner.isdigit(): - if 'owner-id' not in filters: - filters['owner-id'] = list() - filters['owner-id'].append(owner) - elif owner == 'self': + if "owner-id" not in filters: + filters["owner-id"] = list() + filters["owner-id"].append(owner) + elif owner == "self": # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) - owner_param.append(owner) + request_args["Owners"] = [str(owner)] else: - if 'owner-alias' not in filters: - filters['owner-alias'] = list() - filters['owner-alias'].append(owner) + if "owner-alias" not in filters: + filters["owner-alias"] = list() + filters["owner-alias"].append(owner) + + request_args["Filters"] = ansible_dict_to_boto3_filter_list(filters) + + request_args = {k: v for k, v in request_args.items() if v} + + return request_args + + +def get_images(ec2_client, request_args): + try: + images = ec2_client.describe_images(aws_retry=True, **request_args) + except (ClientError, BotoCoreError) as err: + raise AmiInfoFailure(err, "error describing images") + return images - filters = ansible_dict_to_boto3_filter_list(filters) +def get_image_attribute(ec2_client, image_id): try: - images = ec2_client.describe_images(aws_retry=True, ImageIds=image_ids, Filters=filters, Owners=owner_param, - ExecutableUsers=executable_users) - images = [camel_dict_to_snake_dict(image) for image in images["Images"]] + launch_permissions = ec2_client.describe_image_attribute( + aws_retry=True, Attribute="launchPermission", ImageId=image_id + ) except (ClientError, BotoCoreError) as err: - module.fail_json_aws(err, msg="error describing images") + raise AmiInfoFailure(err, "error describing image attribute") + return launch_permissions + + +def list_ec2_images(ec2_client, module, request_args): + images = get_images(ec2_client, request_args)["Images"] + images = [camel_dict_to_snake_dict(image) for image in images] + for image in images: try: - image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', [])) + image_id = image["image_id"] + image["tags"] = boto3_tag_list_to_ansible_dict(image.get("tags", [])) if module.params.get("describe_image_attributes"): - launch_permissions = ec2_client.describe_image_attribute(aws_retry=True, Attribute='launchPermission', - ImageId=image['image_id'])['LaunchPermissions'] - image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] - except is_boto3_error_code('AuthFailure'): + launch_permissions = get_image_attribute(ec2_client, image_id).get("LaunchPermissions", []) + image["launch_permissions"] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] + except is_boto3_error_code("AuthFailure"): # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures pass except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except - module.fail_json_aws(err, 'Failed to describe AMI') + raise AmiInfoFailure(err, "Failed to describe AMI") - images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist - module.exit_json(images=images) + images.sort(key=lambda e: e.get("creation_date", "")) # it may be possible that creation_date does not always exist + return images -def main(): +def main(): argument_spec = dict( - image_ids=dict(default=[], type='list', elements='str', aliases=['image_id']), - filters=dict(default={}, type='dict'), - owners=dict(default=[], type='list', elements='str', aliases=['owner']), - executable_users=dict(default=[], type='list', elements='str', aliases=['executable_user']), - describe_image_attributes=dict(default=False, type='bool') + describe_image_attributes=dict(default=False, type="bool"), + executable_users=dict(default=[], type="list", elements="str", aliases=["executable_user"]), + filters=dict(default={}, type="dict"), + image_ids=dict(default=[], type="list", elements="str", aliases=["image_id"]), + owners=dict(default=[], type="list", elements="str", aliases=["owner"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2_client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + request_args = build_request_args( + executable_users=module.params["executable_users"], + filters=module.params["filters"], + image_ids=module.params["image_ids"], + owners=module.params["owners"], + ) - list_ec2_images(ec2_client, module) + images = list_ec2_images(ec2_client, module, request_args) + + module.exit_json(images=images) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py index 4c3094b98..38bf32c87 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py @@ -4,11 +4,7 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_eip version_added: 5.0.0 @@ -20,10 +16,11 @@ description: options: device_id: description: - - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id. - - The I(instance_id) alias has been deprecated and will be removed after 2022-12-01. + - The id of the device for the EIP. + - Can be an EC2 Instance id or Elastic Network Interface (ENI) id. + - When specifying an ENI id, I(in_vpc) must be C(true) + - The C(instance_id) alias was removed in release 6.0.0. required: false - aliases: [ instance_id ] type: str public_ip: description: @@ -80,8 +77,8 @@ options: only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true). type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 @@ -96,9 +93,9 @@ notes: It returns an overall status based on any changes occurring. It also returns individual changed statuses for disassociation and release. - Support for I(tags) and I(purge_tags) was added in release 2.1.0. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: associate an elastic IP with an instance @@ -204,9 +201,9 @@ EXAMPLES = ''' tag_name: reserved_for tag_value: "{{ inventory_hostname }}" public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 -''' +""" -RETURN = ''' +RETURN = r""" allocation_id: description: allocation_id of the elastic ip returned: on success @@ -217,23 +214,30 @@ public_ip: returned: on success type: str sample: 52.88.159.209 -''' +""" try: import botocore.exceptions except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +class EipError(Exception): + pass -def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True): + +def associate_ip_and_device( + ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True +): if address_is_associated_with_device(ec2, module, address, device_id, is_instance): - return {'changed': False} + return {"changed": False} # If we're in check mode, nothing else to do if not check_mode: @@ -244,60 +248,56 @@ def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, AllowReassociation=allow_reassociation, ) if private_ip_address: - params['PrivateIpAddress'] = private_ip_address - if address['Domain'] == 'vpc': - params['AllocationId'] = address['AllocationId'] + params["PrivateIpAddress"] = private_ip_address + if address["Domain"] == "vpc": + params["AllocationId"] = address["AllocationId"] else: - params['PublicIp'] = address['PublicIp'] + params["PublicIp"] = address["PublicIp"] res = ec2.associate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id) + msg = f"Couldn't associate Elastic IP address with instance '{device_id}'" module.fail_json_aws(e, msg=msg) else: params = dict( NetworkInterfaceId=device_id, - AllocationId=address['AllocationId'], + AllocationId=address["AllocationId"], AllowReassociation=allow_reassociation, ) if private_ip_address: - params['PrivateIpAddress'] = private_ip_address + params["PrivateIpAddress"] = private_ip_address try: res = ec2.associate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id) + msg = f"Couldn't associate Elastic IP address with network interface '{device_id}'" module.fail_json_aws(e, msg=msg) if not res: - module.fail_json_aws(e, msg='Association failed.') + module.fail_json(msg="Association failed.") - return {'changed': True} + return {"changed": True} def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=True): if not address_is_associated_with_device(ec2, module, address, device_id, is_instance): - return {'changed': False} + return {"changed": False} # If we're in check mode, nothing else to do if not check_mode: try: - if address['Domain'] == 'vpc': - res = ec2.disassociate_address( - AssociationId=address['AssociationId'], aws_retry=True - ) + if address["Domain"] == "vpc": + ec2.disassociate_address(AssociationId=address["AssociationId"], aws_retry=True) else: - res = ec2.disassociate_address( - PublicIp=address['PublicIp'], aws_retry=True - ) + ec2.disassociate_address(PublicIp=address["PublicIp"], aws_retry=True) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Dissassociation of Elastic IP failed") - return {'changed': True} + return {"changed": True} @AWSRetry.jittered_backoff() def find_address(ec2, module, public_ip, device_id, is_instance=True): - """ Find an existing Elastic IP address """ + """Find an existing Elastic IP address""" filters = [] kwargs = {} @@ -305,9 +305,9 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): kwargs["PublicIps"] = [public_ip] elif device_id: if is_instance: - filters.append({"Name": 'instance-id', "Values": [device_id]}) + filters.append({"Name": "instance-id", "Values": [device_id]}) else: - filters.append({'Name': 'network-interface-id', "Values": [device_id]}) + filters.append({"Name": "network-interface-id", "Values": [device_id]}) if len(filters) > 0: kwargs["Filters"] = filters @@ -316,9 +316,9 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): try: addresses = ec2.describe_addresses(**kwargs) - except is_boto3_error_code('InvalidAddress.NotFound') as e: + except is_boto3_error_code("InvalidAddress.NotFound") as e: # If we're releasing and we can't find it, it's already gone... - if module.params.get('state') == 'absent': + if module.params.get("state") == "absent": module.exit_json(changed=False, disassociated=False, released=False) module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") @@ -326,13 +326,12 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): if len(addresses) == 1: return addresses[0] elif len(addresses) > 1: - msg = "Found more than one address using args {0}".format(kwargs) - msg += "Addresses found: {0}".format(addresses) + msg = f"Found more than one address using args {kwargs} Addresses found: {addresses}" module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True): - """ Check if the elastic IP is currently associated with the device """ + """Check if the elastic IP is currently associated with the device""" address = find_address(ec2, module, address["PublicIp"], device_id, is_instance) if address: if is_instance: @@ -344,17 +343,26 @@ def address_is_associated_with_device(ec2, module, address, device_id, is_instan return False -def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None): - """ Allocate a new elastic IP address (when needed) and return it """ +def allocate_address( + ec2, + module, + domain, + reuse_existing_ip_allowed, + check_mode, + tags, + search_tags=None, + public_ipv4_pool=None, +): + """Allocate a new elastic IP address (when needed) and return it""" if not domain: - domain = 'standard' + domain = "standard" if reuse_existing_ip_allowed: filters = [] - filters.append({'Name': 'domain', "Values": [domain]}) + filters.append({"Name": "domain", "Values": [domain]}) - if tag_dict is not None: - filters += ansible_dict_to_boto3_filter_list(tag_dict) + if search_tags is not None: + filters += ansible_dict_to_boto3_filter_list(search_tags) try: all_addresses = ec2.describe_addresses(Filters=filters, aws_retry=True) @@ -363,60 +371,72 @@ def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, all_addresses = all_addresses["Addresses"] - if domain == 'vpc': - unassociated_addresses = [a for a in all_addresses - if not a.get('AssociationId', None)] + if domain == "vpc": + unassociated_addresses = [a for a in all_addresses if not a.get("AssociationId", None)] else: - unassociated_addresses = [a for a in all_addresses - if not a['InstanceId']] + unassociated_addresses = [a for a in all_addresses if not a["InstanceId"]] if unassociated_addresses: return unassociated_addresses[0], False if public_ipv4_pool: - return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True + return ( + allocate_address_from_pool( + ec2, + module, + domain, + check_mode, + public_ipv4_pool, + tags, + ), + True, + ) + + params = {"Domain": domain} + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, types="elastic-ip") try: if check_mode: return None, True - result = ec2.allocate_address(Domain=domain, aws_retry=True), True + result = ec2.allocate_address(aws_retry=True, **params), True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") return result def release_address(ec2, module, address, check_mode): - """ Release a previously allocated elastic IP address """ + """Release a previously allocated elastic IP address""" # If we're in check mode, nothing else to do if not check_mode: try: - result = ec2.release_address(AllocationId=address['AllocationId'], aws_retry=True) + ec2.release_address(AllocationId=address["AllocationId"], aws_retry=True) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't release Elastic IP address") - return {'changed': True} + return {"changed": True} @AWSRetry.jittered_backoff() def describe_eni_with_backoff(ec2, module, device_id): try: return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id]) - except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e: + except is_boto3_error_code("InvalidNetworkInterfaceID.NotFound") as e: module.fail_json_aws(e, msg="Couldn't get list of network interfaces.") def find_device(ec2, module, device_id, is_instance=True): - """ Attempt to find the EC2 instance and return it """ + """Attempt to find the EC2 instance and return it""" if is_instance: try: - paginator = ec2.get_paginator('describe_instances') - reservations = list(paginator.paginate(InstanceIds=[device_id]).search('Reservations[]')) + paginator = ec2.get_paginator("describe_instances") + reservations = list(paginator.paginate(InstanceIds=[device_id]).search("Reservations[]")) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't get list of instances") if len(reservations) == 1: - instances = reservations[0]['Instances'] + instances = reservations[0]["Instances"] if len(instances) == 1: return instances[0] else: @@ -428,76 +448,98 @@ def find_device(ec2, module, device_id, is_instance=True): return interfaces[0] -def ensure_present(ec2, module, domain, address, private_ip_address, device_id, - reuse_existing_ip_allowed, allow_reassociation, check_mode, is_instance=True): +def ensure_present( + ec2, + module, + domain, + address, + private_ip_address, + device_id, + reuse_existing_ip_allowed, + allow_reassociation, + check_mode, + tags, + is_instance=True, +): changed = False # Return the EIP object since we've been given a public IP if not address: if check_mode: - return {'changed': True} + return {"changed": True} - address, changed = allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode) + address, changed = allocate_address( + ec2, + module, + domain, + reuse_existing_ip_allowed, + check_mode, + tags, + ) if device_id: # Allocate an IP for instance since no public_ip was provided if is_instance: instance = find_device(ec2, module, device_id) if reuse_existing_ip_allowed: - if instance['VpcId'] and len(instance['VpcId']) > 0 and domain is None: + if instance["VpcId"] and len(instance["VpcId"]) > 0 and domain is None: msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc" module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) # Associate address object (provided or allocated) with instance assoc_result = associate_ip_and_device( - ec2, module, address, private_ip_address, device_id, allow_reassociation, - check_mode + ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode ) else: instance = find_device(ec2, module, device_id, is_instance=False) # Associate address object (provided or allocated) with instance assoc_result = associate_ip_and_device( - ec2, module, address, private_ip_address, device_id, allow_reassociation, - check_mode, is_instance=False + ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=False ) - changed = changed or assoc_result['changed'] + changed = changed or assoc_result["changed"] - return {'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId']} + return {"changed": changed, "public_ip": address["PublicIp"], "allocation_id": address["AllocationId"]} def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True): if not address: - return {'changed': False} + return {"changed": False} # disassociating address from instance if device_id: if is_instance: - return disassociate_ip_and_device( - ec2, module, address, device_id, check_mode - ) + return disassociate_ip_and_device(ec2, module, address, device_id, check_mode) else: - return disassociate_ip_and_device( - ec2, module, address, device_id, check_mode, is_instance=False - ) + return disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=False) # releasing address else: return release_address(ec2, module, address, check_mode) -def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): +def allocate_address_from_pool( + ec2, + module, + domain, + check_mode, + public_ipv4_pool, + tags, +): # type: (EC2Connection, AnsibleAWSModule, str, bool, str) -> Address - """ Overrides botocore's allocate_address function to support BYOIP """ + """Overrides botocore's allocate_address function to support BYOIP""" if check_mode: return None params = {} if domain is not None: - params['Domain'] = domain + params["Domain"] = domain if public_ipv4_pool is not None: - params['PublicIpv4Pool'] = public_ipv4_pool + params["PublicIpv4Pool"] = public_ipv4_pool + + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, types="elastic-ip") try: result = ec2.allocate_address(aws_retry=True, **params) @@ -508,82 +550,82 @@ def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool def generate_tag_dict(module, tag_name, tag_value): # type: (AnsibleAWSModule, str, str) -> Optional[Dict] - """ Generates a dictionary to be passed as a filter to Amazon """ + """Generates a dictionary to be passed as a filter to Amazon""" if tag_name and not tag_value: - if tag_name.startswith('tag:'): - tag_name = tag_name.strip('tag:') - return {'tag-key': tag_name} + if tag_name.startswith("tag:"): + tag_name = tag_name.strip("tag:") + return {"tag-key": tag_name} elif tag_name and tag_value: - if not tag_name.startswith('tag:'): - tag_name = 'tag:' + tag_name + if not tag_name.startswith("tag:"): + tag_name = "tag:" + tag_name return {tag_name: tag_value} elif tag_value and not tag_name: module.fail_json(msg="parameters are required together: ('tag_name', 'tag_value')") +def check_is_instance(device_id, in_vpc): + if not device_id: + return False + if device_id.startswith("i-"): + return True + + if device_id.startswith("eni-") and not in_vpc: + raise EipError("If you are specifying an ENI, in_vpc must be true") + + return False + + def main(): argument_spec = dict( - device_id=dict(required=False, aliases=['instance_id'], - deprecated_aliases=[dict(name='instance_id', - date='2022-12-01', - collection_name='amazon.aws')]), - public_ip=dict(required=False, aliases=['ip']), - state=dict(required=False, default='present', - choices=['present', 'absent']), - in_vpc=dict(required=False, type='bool', default=False), - reuse_existing_ip_allowed=dict(required=False, type='bool', - default=False), - release_on_disassociation=dict(required=False, type='bool', default=False), - allow_reassociation=dict(type='bool', default=False), + device_id=dict(required=False), + public_ip=dict(required=False, aliases=["ip"]), + state=dict(required=False, default="present", choices=["present", "absent"]), + in_vpc=dict(required=False, type="bool", default=False), + reuse_existing_ip_allowed=dict(required=False, type="bool", default=False), + release_on_disassociation=dict(required=False, type="bool", default=False), + allow_reassociation=dict(type="bool", default=False), private_ip_address=dict(), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(required=False, type="bool", default=True), tag_name=dict(), tag_value=dict(), - public_ipv4_pool=dict() + public_ipv4_pool=dict(), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, required_by={ - 'private_ip_address': ['device_id'], + "private_ip_address": ["device_id"], }, ) - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - - device_id = module.params.get('device_id') - instance_id = module.params.get('instance_id') - public_ip = module.params.get('public_ip') - private_ip_address = module.params.get('private_ip_address') - state = module.params.get('state') - in_vpc = module.params.get('in_vpc') - domain = 'vpc' if in_vpc else None - reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') - release_on_disassociation = module.params.get('release_on_disassociation') - allow_reassociation = module.params.get('allow_reassociation') - tag_name = module.params.get('tag_name') - tag_value = module.params.get('tag_value') - public_ipv4_pool = module.params.get('public_ipv4_pool') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - if instance_id: - is_instance = True - device_id = instance_id - else: - if device_id and device_id.startswith('i-'): - is_instance = True - elif device_id: - if device_id.startswith('eni-') and not in_vpc: - module.fail_json(msg="If you are specifying an ENI, in_vpc must be true") - is_instance = False + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + device_id = module.params.get("device_id") + public_ip = module.params.get("public_ip") + private_ip_address = module.params.get("private_ip_address") + state = module.params.get("state") + in_vpc = module.params.get("in_vpc") + domain = "vpc" if in_vpc else None + reuse_existing_ip_allowed = module.params.get("reuse_existing_ip_allowed") + release_on_disassociation = module.params.get("release_on_disassociation") + allow_reassociation = module.params.get("allow_reassociation") + tag_name = module.params.get("tag_name") + tag_value = module.params.get("tag_value") + public_ipv4_pool = module.params.get("public_ipv4_pool") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + try: + is_instance = check_is_instance(device_id, in_vpc) + except EipError as e: + module.fail_json(msg=str(e)) # Tags for *searching* for an EIP. - tag_dict = generate_tag_dict(module, tag_name, tag_value) + search_tags = generate_tag_dict(module, tag_name, tag_value) try: if device_id: @@ -591,70 +633,78 @@ def main(): else: address = find_address(ec2, module, public_ip, None) - if state == 'present': + if state == "present": if device_id: result = ensure_present( - ec2, module, domain, address, private_ip_address, device_id, - reuse_existing_ip_allowed, allow_reassociation, - module.check_mode, is_instance=is_instance + ec2, + module, + domain, + address, + private_ip_address, + device_id, + reuse_existing_ip_allowed, + allow_reassociation, + module.check_mode, + tags, + is_instance=is_instance, ) - if 'allocation_id' not in result: + if "allocation_id" not in result: # Don't check tags on check_mode here - no EIP to pass through module.exit_json(**result) else: if address: result = { - 'changed': False, - 'public_ip': address['PublicIp'], - 'allocation_id': address['AllocationId'] + "changed": False, + "public_ip": address["PublicIp"], + "allocation_id": address["AllocationId"], } else: address, changed = allocate_address( - ec2, module, domain, reuse_existing_ip_allowed, - module.check_mode, tag_dict, public_ipv4_pool + ec2, + module, + domain, + reuse_existing_ip_allowed, + module.check_mode, + tags, + search_tags, + public_ipv4_pool, ) if address: result = { - 'changed': changed, - 'public_ip': address['PublicIp'], - 'allocation_id': address['AllocationId'] + "changed": changed, + "public_ip": address["PublicIp"], + "allocation_id": address["AllocationId"], } else: # Don't check tags on check_mode here - no EIP to pass through - result = { - 'changed': changed - } + result = {"changed": changed} module.exit_json(**result) - result['changed'] |= ensure_ec2_tags( - ec2, module, result['allocation_id'], - resource_type='elastic-ip', tags=tags, purge_tags=purge_tags) + result["changed"] |= ensure_ec2_tags( + ec2, module, result["allocation_id"], resource_type="elastic-ip", tags=tags, purge_tags=purge_tags + ) else: if device_id: disassociated = ensure_absent( ec2, module, address, device_id, module.check_mode, is_instance=is_instance ) - if release_on_disassociation and disassociated['changed']: + if release_on_disassociation and disassociated["changed"]: released = release_address(ec2, module, address, module.check_mode) result = { - 'changed': True, - 'disassociated': disassociated['changed'], - 'released': released['changed'] + "changed": True, + "disassociated": disassociated["changed"], + "released": released["changed"], } else: result = { - 'changed': disassociated['changed'], - 'disassociated': disassociated['changed'], - 'released': False + "changed": disassociated["changed"], + "disassociated": disassociated["changed"], + "released": False, } else: released = release_address(ec2, module, address, module.check_mode) - result = { - 'changed': released['changed'], - 'disassociated': False, - 'released': released['changed'] - } + result = {"changed": released["changed"], "disassociated": False, "released": released["changed"]} except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(str(e)) @@ -662,5 +712,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py index c94f164f5..c00dc515c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_eip_info version_added: 5.0.0 @@ -26,13 +24,12 @@ options: default: {} type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details or the AWS region, # see the AWS Guide for details. @@ -43,7 +40,7 @@ EXAMPLES = r''' - name: List all EIP addresses for a VM. amazon.aws.ec2_eip_info: filters: - instance-id: i-123456789 + instance-id: i-123456789 register: my_vm_eips - ansible.builtin.debug: @@ -52,9 +49,9 @@ EXAMPLES = r''' - name: List all EIP addresses for several VMs. amazon.aws.ec2_eip_info: filters: - instance-id: - - i-123456789 - - i-987654321 + instance-id: + - i-123456789 + - i-987654321 register: my_vms_eips - name: List all EIP addresses using the 'Name' tag as a filter. @@ -74,11 +71,10 @@ EXAMPLES = r''' - ansible.builtin.set_fact: eip_alloc: my_vms_eips.addresses[0].allocation_id my_pub_ip: my_vms_eips.addresses[0].public_ip - -''' +""" -RETURN = ''' +RETURN = r""" addresses: description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP. returned: on success @@ -96,52 +92,42 @@ addresses: "Name": "test-vm-54.81.104.1" } }] - -''' +""" try: - from botocore.exceptions import (BotoCoreError, ClientError) + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def get_eips_details(module): - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) filters = module.params.get("filters") try: - response = connection.describe_addresses( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(filters) - ) + response = connection.describe_addresses(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(filters)) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg="Error retrieving EIPs") + module.fail_json_aws(e, msg="Error retrieving EIPs") - addresses = camel_dict_to_snake_dict(response)['addresses'] + addresses = camel_dict_to_snake_dict(response)["addresses"] for address in addresses: - if 'tags' in address: - address['tags'] = boto3_tag_list_to_ansible_dict(address['tags']) + if "tags" in address: + address["tags"] = boto3_tag_list_to_ansible_dict(address["tags"]) return addresses def main(): - module = AnsibleAWSModule( - argument_spec=dict( - filters=dict(type='dict', default={}) - ), - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=dict(filters=dict(type="dict", default={})), supports_check_mode=True) module.exit_json(changed=False, addresses=get_eips_details(module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py index 46c90d542..bf8e76a2b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_eni version_added: 1.0.0 @@ -116,17 +114,17 @@ options: required: false type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 notes: - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id), or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI. - Support for I(tags) and I(purge_tags) was added in release 1.3.0. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an ENI. As no security group is defined, ENI will be created in default security group @@ -210,11 +208,10 @@ EXAMPLES = ''' - amazon.aws.ec2_eni: eni_id: "{{ eni.interface.id }}" delete_on_termination: true - -''' +""" -RETURN = ''' +RETURN = r""" interface: description: Network interface attributes returned: when state != absent @@ -274,8 +271,7 @@ interface: description: which vpc this network interface is bound type: str sample: vpc-9a9a9da - -''' +""" import time from ipaddress import ip_address @@ -286,41 +282,41 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter def get_eni_info(interface): - # Private addresses private_addresses = [] if "PrivateIpAddresses" in interface: for ip in interface["PrivateIpAddresses"]: - private_addresses.append({'private_ip_address': ip["PrivateIpAddress"], 'primary_address': ip["Primary"]}) + private_addresses.append({"private_ip_address": ip["PrivateIpAddress"], "primary_address": ip["Primary"]}) groups = {} if "Groups" in interface: for group in interface["Groups"]: groups[group["GroupId"]] = group["GroupName"] - interface_info = {'id': interface.get("NetworkInterfaceId"), - 'subnet_id': interface.get("SubnetId"), - 'vpc_id': interface.get("VpcId"), - 'description': interface.get("Description"), - 'owner_id': interface.get("OwnerId"), - 'status': interface.get("Status"), - 'mac_address': interface.get("MacAddress"), - 'private_ip_address': interface.get("PrivateIpAddress"), - 'source_dest_check': interface.get("SourceDestCheck"), - 'groups': groups, - 'private_ip_addresses': private_addresses - } + interface_info = { + "id": interface.get("NetworkInterfaceId"), + "subnet_id": interface.get("SubnetId"), + "vpc_id": interface.get("VpcId"), + "description": interface.get("Description"), + "owner_id": interface.get("OwnerId"), + "status": interface.get("Status"), + "mac_address": interface.get("MacAddress"), + "private_ip_address": interface.get("PrivateIpAddress"), + "source_dest_check": interface.get("SourceDestCheck"), + "groups": groups, + "private_ip_addresses": private_addresses, + } if "TagSet" in interface: tags = boto3_tag_list_to_ansible_dict(interface["TagSet"]) @@ -329,13 +325,13 @@ def get_eni_info(interface): interface_info["tags"] = tags if "Attachment" in interface: - interface_info['attachment'] = { - 'attachment_id': interface["Attachment"].get("AttachmentId"), - 'instance_id': interface["Attachment"].get("InstanceId"), - 'device_index': interface["Attachment"].get("DeviceIndex"), - 'status': interface["Attachment"].get("Status"), - 'attach_time': interface["Attachment"].get("AttachTime"), - 'delete_on_termination': interface["Attachment"].get("DeleteOnTermination"), + interface_info["attachment"] = { + "attachment_id": interface["Attachment"].get("AttachmentId"), + "instance_id": interface["Attachment"].get("InstanceId"), + "device_index": interface["Attachment"].get("DeviceIndex"), + "status": interface["Attachment"].get("Status"), + "attach_time": interface["Attachment"].get("AttachTime"), + "delete_on_termination": interface["Attachment"].get("DeleteOnTermination"), } return interface_info @@ -390,20 +386,16 @@ def wait_for(function_pointer, *args): def create_eni(connection, vpc_id, module): - instance_id = module.params.get("instance_id") attached = module.params.get("attached") - if instance_id == 'None': + if instance_id == "None": instance_id = None device_index = module.params.get("device_index") - subnet_id = module.params.get('subnet_id') - private_ip_address = module.params.get('private_ip_address') - description = module.params.get('description') + subnet_id = module.params.get("subnet_id") + private_ip_address = module.params.get("private_ip_address") + description = module.params.get("description") security_groups = get_ec2_security_group_ids_from_names( - module.params.get('security_groups'), - connection, - vpc_id=vpc_id, - boto3=True + module.params.get("security_groups"), connection, vpc_id=vpc_id, boto3=True ) secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") @@ -413,7 +405,7 @@ def create_eni(connection, vpc_id, module): name = module.params.get("name") # Make sure that the 'name' parameter sets the Name tag if name: - tags['Name'] = name + tags["Name"] = name try: args = {"SubnetId": subnet_id} @@ -424,14 +416,17 @@ def create_eni(connection, vpc_id, module): if len(security_groups) > 0: args["Groups"] = security_groups if tags: - args["TagSpecifications"] = boto3_tag_specifications(tags, types='network-interface') + args["TagSpecifications"] = boto3_tag_specifications(tags, types="network-interface") # check if provided private_ip_address is within the subnet's address range if private_ip_address: - cidr_block = connection.describe_subnets(SubnetIds=[str(subnet_id)])['Subnets'][0]['CidrBlock'] + cidr_block = connection.describe_subnets(SubnetIds=[str(subnet_id)])["Subnets"][0]["CidrBlock"] valid_private_ip = ip_address(private_ip_address) in ip_network(cidr_block) if not valid_private_ip: - module.fail_json(changed=False, msg="Error: cannot create ENI - Address does not fall within the subnet's address range.") + module.fail_json( + changed=False, + msg="Error: cannot create ENI - Address does not fall within the subnet's address range.", + ) if module.check_mode: module.exit_json(changed=True, msg="Would have created ENI if not in check mode.") @@ -439,7 +434,7 @@ def create_eni(connection, vpc_id, module): eni = eni_dict["NetworkInterface"] # Once we have an ID make sure we're always modifying the same object eni_id = eni["NetworkInterfaceId"] - get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_available").wait(NetworkInterfaceIds=[eni_id]) if attached and instance_id is not None: try: @@ -447,19 +442,19 @@ def create_eni(connection, vpc_id, module): aws_retry=True, InstanceId=instance_id, DeviceIndex=device_index, - NetworkInterfaceId=eni["NetworkInterfaceId"] + NetworkInterfaceId=eni["NetworkInterfaceId"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) raise - get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_attached").wait(NetworkInterfaceIds=[eni_id]) if secondary_private_ip_address_count is not None: try: connection.assign_private_ip_addresses( aws_retry=True, NetworkInterfaceId=eni["NetworkInterfaceId"], - SecondaryPrivateIpAddressCount=secondary_private_ip_address_count + SecondaryPrivateIpAddressCount=secondary_private_ip_address_count, ) wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): @@ -469,8 +464,7 @@ def create_eni(connection, vpc_id, module): if secondary_private_ip_addresses is not None: try: connection.assign_private_ip_addresses( - NetworkInterfaceId=eni["NetworkInterfaceId"], - PrivateIpAddresses=secondary_private_ip_addresses + NetworkInterfaceId=eni["NetworkInterfaceId"], PrivateIpAddresses=secondary_private_ip_addresses ) wait_for(correct_ips, connection, secondary_private_ip_addresses, module, eni_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): @@ -482,21 +476,17 @@ def create_eni(connection, vpc_id, module): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - "Failed to create eni {0} for {1} in {2} with {3}".format(name, subnet_id, vpc_id, private_ip_address) - ) + module.fail_json_aws(e, f"Failed to create eni {name} for {subnet_id} in {vpc_id} with {private_ip_address}") module.exit_json(changed=changed, interface=get_eni_info(eni)) def modify_eni(connection, module, eni): - instance_id = module.params.get("instance_id") attached = module.params.get("attached") device_index = module.params.get("device_index") - description = module.params.get('description') - security_groups = module.params.get('security_groups') + description = module.params.get("description") + security_groups = module.params.get("security_groups") source_dest_check = module.params.get("source_dest_check") delete_on_termination = module.params.get("delete_on_termination") secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") @@ -516,9 +506,7 @@ def modify_eni(connection, module, eni): if "Description" not in eni or eni["Description"] != description: if not module.check_mode: connection.modify_network_interface_attribute( - aws_retry=True, - NetworkInterfaceId=eni_id, - Description={'Value': description} + aws_retry=True, NetworkInterfaceId=eni_id, Description={"Value": description} ) changed = True if len(security_groups) > 0: @@ -526,18 +514,14 @@ def modify_eni(connection, module, eni): if sorted(get_sec_group_list(eni["Groups"])) != sorted(groups): if not module.check_mode: connection.modify_network_interface_attribute( - aws_retry=True, - NetworkInterfaceId=eni_id, - Groups=groups + aws_retry=True, NetworkInterfaceId=eni_id, Groups=groups ) changed = True if source_dest_check is not None: if "SourceDestCheck" not in eni or eni["SourceDestCheck"] != source_dest_check: if not module.check_mode: connection.modify_network_interface_attribute( - aws_retry=True, - NetworkInterfaceId=eni_id, - SourceDestCheck={'Value': source_dest_check} + aws_retry=True, NetworkInterfaceId=eni_id, SourceDestCheck={"Value": source_dest_check} ) changed = True if delete_on_termination is not None and "Attachment" in eni: @@ -546,8 +530,10 @@ def modify_eni(connection, module, eni): connection.modify_network_interface_attribute( aws_retry=True, NetworkInterfaceId=eni_id, - Attachment={'AttachmentId': eni["Attachment"]["AttachmentId"], - 'DeleteOnTermination': delete_on_termination} + Attachment={ + "AttachmentId": eni["Attachment"]["AttachmentId"], + "DeleteOnTermination": delete_on_termination, + }, ) if delete_on_termination: waiter = "network_interface_delete_on_terminate" @@ -578,7 +564,7 @@ def modify_eni(connection, module, eni): aws_retry=True, NetworkInterfaceId=eni_id, PrivateIpAddresses=secondary_addresses_to_add, - AllowReassignment=allow_reassignment + AllowReassignment=allow_reassignment, ) wait_for(correct_ips, connection, secondary_addresses_to_add, module, eni_id) changed = True @@ -590,19 +576,23 @@ def modify_eni(connection, module, eni): connection.assign_private_ip_addresses( aws_retry=True, NetworkInterfaceId=eni_id, - SecondaryPrivateIpAddressCount=(secondary_private_ip_address_count - current_secondary_address_count), - AllowReassignment=allow_reassignment + SecondaryPrivateIpAddressCount=( + secondary_private_ip_address_count - current_secondary_address_count + ), + AllowReassignment=allow_reassignment, ) wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) changed = True elif secondary_private_ip_address_count < current_secondary_address_count: # How many of these addresses do we want to remove if not module.check_mode: - secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count + secondary_addresses_to_remove_count = ( + current_secondary_address_count - secondary_private_ip_address_count + ) connection.unassign_private_ip_addresses( aws_retry=True, NetworkInterfaceId=eni_id, - PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count] + PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count], ) wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) changed = True @@ -617,7 +607,7 @@ def modify_eni(connection, module, eni): DeviceIndex=device_index, NetworkInterfaceId=eni_id, ) - get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_attached").wait(NetworkInterfaceIds=[eni_id]) changed = True if "Attachment" not in eni: if not module.check_mode: @@ -627,36 +617,37 @@ def modify_eni(connection, module, eni): DeviceIndex=device_index, NetworkInterfaceId=eni_id, ) - get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_attached").wait(NetworkInterfaceIds=[eni_id]) changed = True elif attached is False: changed |= detach_eni(connection, eni, module) - get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_available").wait(NetworkInterfaceIds=[eni_id]) changed |= manage_tags(connection, module, eni, name, tags, purge_tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to modify eni {0}".format(eni_id)) + module.fail_json_aws(e, f"Failed to modify eni {eni_id}") eni = describe_eni(connection, module, eni_id) if module.check_mode and changed: - module.exit_json(changed=changed, msg="Would have modified ENI: {0} if not in check mode".format(eni['NetworkInterfaceId'])) + module.exit_json( + changed=changed, msg=f"Would have modified ENI: {eni['NetworkInterfaceId']} if not in check mode" + ) module.exit_json(changed=changed, interface=get_eni_info(eni)) def _wait_for_detach(connection, module, eni_id): try: - get_waiter(connection, 'network_interface_available').wait( + get_waiter(connection, "network_interface_available").wait( NetworkInterfaceIds=[eni_id], - WaiterConfig={'Delay': 5, 'MaxAttempts': 80}, + WaiterConfig={"Delay": 5, "MaxAttempts": 80}, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, "Timeout waiting for ENI {0} to detach".format(eni_id)) + module.fail_json_aws(e, f"Timeout waiting for ENI {eni_id} to detach") def delete_eni(connection, module): - eni = uniquely_find_eni(connection, module) if not eni: module.exit_json(changed=False) @@ -683,14 +674,16 @@ def delete_eni(connection, module): changed = True module.exit_json(changed=changed) - except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + except is_boto3_error_code("InvalidNetworkInterfaceID.NotFound"): module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, "Failure during delete of {0}".format(eni_id)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, f"Failure during delete of {eni_id}") def detach_eni(connection, eni, module): - if module.check_mode: module.exit_json(changed=True, msg="Would have detached ENI if not in check mode.") @@ -717,11 +710,10 @@ def describe_eni(connection, module, eni_id): else: return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to describe eni with id: {0}".format(eni_id)) + module.fail_json_aws(e, f"Failed to describe eni with id: {eni_id}") def uniquely_find_eni(connection, module, eni=None): - if eni: # In the case of create, eni_id will not be a param but we can still get the eni_id after creation if "NetworkInterfaceId" in eni: @@ -731,11 +723,11 @@ def uniquely_find_eni(connection, module, eni=None): else: eni_id = module.params.get("eni_id") - private_ip_address = module.params.get('private_ip_address') - subnet_id = module.params.get('subnet_id') - instance_id = module.params.get('instance_id') - device_index = module.params.get('device_index') - attached = module.params.get('attached') + private_ip_address = module.params.get("private_ip_address") + subnet_id = module.params.get("subnet_id") + instance_id = module.params.get("instance_id") + device_index = module.params.get("device_index") + attached = module.params.get("attached") name = module.params.get("name") filters = [] @@ -745,26 +737,19 @@ def uniquely_find_eni(connection, module, eni=None): return None if eni_id: - filters.append({'Name': 'network-interface-id', - 'Values': [eni_id]}) + filters.append({"Name": "network-interface-id", "Values": [eni_id]}) if private_ip_address and subnet_id and not filters: - filters.append({'Name': 'private-ip-address', - 'Values': [private_ip_address]}) - filters.append({'Name': 'subnet-id', - 'Values': [subnet_id]}) + filters.append({"Name": "private-ip-address", "Values": [private_ip_address]}) + filters.append({"Name": "subnet-id", "Values": [subnet_id]}) if not attached and instance_id and device_index and not filters: - filters.append({'Name': 'attachment.instance-id', - 'Values': [instance_id]}) - filters.append({'Name': 'attachment.device-index', - 'Values': [str(device_index)]}) + filters.append({"Name": "attachment.instance-id", "Values": [instance_id]}) + filters.append({"Name": "attachment.device-index", "Values": [str(device_index)]}) if name and subnet_id and not filters: - filters.append({'Name': 'tag:Name', - 'Values': [name]}) - filters.append({'Name': 'subnet-id', - 'Values': [subnet_id]}) + filters.append({"Name": "tag:Name", "Values": [name]}) + filters.append({"Name": "subnet-id", "Values": [subnet_id]}) if not filters: return None @@ -776,13 +761,12 @@ def uniquely_find_eni(connection, module, eni=None): else: return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to find unique eni with filters: {0}".format(filters)) + module.fail_json_aws(e, f"Failed to find unique eni with filters: {filters}") return None def get_sec_group_list(groups): - # Build list of remote security groups remote_security_groups = [] for group in groups: @@ -792,12 +776,11 @@ def get_sec_group_list(groups): def _get_vpc_id(connection, module, subnet_id): - try: subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) return subnets["Subnets"][0]["VpcId"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to get vpc_id for {0}".format(subnet_id)) + module.fail_json_aws(e, f"Failed to get vpc_id for {subnet_id}") def manage_tags(connection, module, eni, name, tags, purge_tags): @@ -807,9 +790,9 @@ def manage_tags(connection, module, eni, name, tags, purge_tags): tags = {} if name: - tags['Name'] = name + tags["Name"] = name - eni_id = eni['NetworkInterfaceId'] + eni_id = eni["NetworkInterfaceId"] changed = ensure_ec2_tags(connection, module, eni_id, tags=tags, purge_tags=purge_tags) return changed @@ -817,60 +800,60 @@ def manage_tags(connection, module, eni, name, tags, purge_tags): def main(): argument_spec = dict( - eni_id=dict(default=None, type='str'), - instance_id=dict(default=None, type='str'), - private_ip_address=dict(type='str'), - subnet_id=dict(type='str'), - description=dict(type='str'), - security_groups=dict(default=[], type='list', elements='str'), - device_index=dict(default=0, type='int'), - state=dict(default='present', choices=['present', 'absent']), - force_detach=dict(default='no', type='bool'), - source_dest_check=dict(default=None, type='bool'), - delete_on_termination=dict(default=None, type='bool'), - secondary_private_ip_addresses=dict(default=None, type='list', elements='str'), - purge_secondary_private_ip_addresses=dict(default=False, type='bool'), - secondary_private_ip_address_count=dict(default=None, type='int'), - allow_reassignment=dict(default=False, type='bool'), - attached=dict(default=None, type='bool'), - name=dict(default=None, type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + eni_id=dict(default=None, type="str"), + instance_id=dict(default=None, type="str"), + private_ip_address=dict(type="str"), + subnet_id=dict(type="str"), + description=dict(type="str"), + security_groups=dict(default=[], type="list", elements="str"), + device_index=dict(default=0, type="int"), + state=dict(default="present", choices=["present", "absent"]), + force_detach=dict(default="no", type="bool"), + source_dest_check=dict(default=None, type="bool"), + delete_on_termination=dict(default=None, type="bool"), + secondary_private_ip_addresses=dict(default=None, type="list", elements="str"), + purge_secondary_private_ip_addresses=dict(default=False, type="bool"), + secondary_private_ip_address_count=dict(default=None, type="int"), + allow_reassignment=dict(default=False, type="bool"), + attached=dict(default=None, type="bool"), + name=dict(default=None, type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[ - ['secondary_private_ip_addresses', 'secondary_private_ip_address_count'] - ], - required_if=([ - ('attached', True, ['instance_id']), - ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses']) - ]), + mutually_exclusive=[["secondary_private_ip_addresses", "secondary_private_ip_address_count"]], + required_if=( + [ + ("attached", True, ["instance_id"]), + ("purge_secondary_private_ip_addresses", True, ["secondary_private_ip_addresses"]), + ] + ), supports_check_mode=True, ) retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=['IncorrectState'], + catch_extra_error_codes=["IncorrectState"], ) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) state = module.params.get("state") - if state == 'present': + if state == "present": eni = uniquely_find_eni(connection, module) if eni is None: subnet_id = module.params.get("subnet_id") if subnet_id is None: - module.fail_json(msg='subnet_id is required when creating a new ENI') + module.fail_json(msg="subnet_id is required when creating a new ENI") vpc_id = _get_vpc_id(connection, module, subnet_id) create_eni(connection, vpc_id, module) else: modify_eni(connection, module, eni) - elif state == 'absent': + elif state == "absent": delete_eni(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py index 6eb24c22f..5ef36b258 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_eni_info version_added: 1.0.0 @@ -28,13 +26,14 @@ options: See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters. - This option is mutually exclusive of I(eni_id). type: dict + default: {} extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all ENIs @@ -44,10 +43,9 @@ EXAMPLES = ''' - amazon.aws.ec2_eni_info: filters: network-interface-id: eni-xxxxxxx +""" -''' - -RETURN = ''' +RETURN = r""" network_interfaces: description: List of matching elastic network interfaces. returned: always @@ -188,7 +186,7 @@ network_interfaces: returned: always type: str sample: "vpc-b3f1f123" -''' +""" try: from botocore.exceptions import ClientError @@ -198,90 +196,59 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +def build_request_args(eni_id, filters): + request_args = { + "NetworkInterfaceIds": [eni_id] if eni_id else [], + "Filters": ansible_dict_to_boto3_filter_list(filters), + } -def list_eni(connection, module): + request_args = {k: v for k, v in request_args.items() if v} - params = {} - # Options are mutually exclusive - if module.params.get("eni_id"): - params['NetworkInterfaceIds'] = [module.params.get("eni_id")] - elif module.params.get("filters"): - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - else: - params['Filters'] = [] + return request_args + +def get_network_interfaces(connection, module, request_args): try: - network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **params)['NetworkInterfaces'] - except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **request_args) + except is_boto3_error_code("InvalidNetworkInterfaceID.NotFound"): module.exit_json(network_interfaces=[]) except (ClientError, NoCredentialsError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - # Modify boto3 tags list to be ansible friendly dict and then camel_case - camel_network_interfaces = [] - for network_interface in network_interfaces_result: - network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet']) - network_interface['Tags'] = network_interface['TagSet'] - if 'Name' in network_interface['Tags']: - network_interface['Name'] = network_interface['Tags']['Name'] - # Added id to interface info to be compatible with return values of ec2_eni module: - network_interface['Id'] = network_interface['NetworkInterfaceId'] - camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=['Tags', 'TagSet'])) - - module.exit_json(network_interfaces=camel_network_interfaces) + return network_interfaces_result -def get_eni_info(interface): +def list_eni(connection, module, request_args): + network_interfaces_result = get_network_interfaces(connection, module, request_args) - # Private addresses - private_addresses = [] - for ip in interface.private_ip_addresses: - private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary}) - - interface_info = {'id': interface.id, - 'subnet_id': interface.subnet_id, - 'vpc_id': interface.vpc_id, - 'description': interface.description, - 'owner_id': interface.owner_id, - 'status': interface.status, - 'mac_address': interface.mac_address, - 'private_ip_address': interface.private_ip_address, - 'source_dest_check': interface.source_dest_check, - 'groups': dict((group.id, group.name) for group in interface.groups), - 'private_ip_addresses': private_addresses - } - - if hasattr(interface, 'publicDnsName'): - interface_info['association'] = {'public_ip_address': interface.publicIp, - 'public_dns_name': interface.publicDnsName, - 'ip_owner_id': interface.ipOwnerId - } - - if interface.attachment is not None: - interface_info['attachment'] = {'attachment_id': interface.attachment.id, - 'instance_id': interface.attachment.instance_id, - 'device_index': interface.attachment.device_index, - 'status': interface.attachment.status, - 'attach_time': interface.attachment.attach_time, - 'delete_on_termination': interface.attachment.delete_on_termination, - } + # Modify boto3 tags list to be ansible friendly dict and then camel_case + camel_network_interfaces = [] + for network_interface in network_interfaces_result["NetworkInterfaces"]: + network_interface["TagSet"] = boto3_tag_list_to_ansible_dict(network_interface["TagSet"]) + network_interface["Tags"] = network_interface["TagSet"] + if "Name" in network_interface["Tags"]: + network_interface["Name"] = network_interface["Tags"]["Name"] + # Added id to interface info to be compatible with return values of ec2_eni module: + network_interface["Id"] = network_interface["NetworkInterfaceId"] + camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=["Tags", "TagSet"])) - return interface_info + return camel_network_interfaces def main(): argument_spec = dict( - eni_id=dict(type='str'), - filters=dict(default=None, type='dict') + eni_id=dict(type="str"), + filters=dict(default={}, type="dict"), ) mutually_exclusive = [ - ['eni_id', 'filters'] + ["eni_id", "filters"], ] module = AnsibleAWSModule( @@ -290,10 +257,17 @@ def main(): mutually_exclusive=mutually_exclusive, ) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + request_args = build_request_args( + eni_id=module.params["eni_id"], + filters=module.params["filters"], + ) + + result = list_eni(connection, module, request_args) - list_eni(connection, module) + module.exit_json(network_interfaces=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_import_image.py b/ansible_collections/amazon/aws/plugins/modules/ec2_import_image.py new file mode 100644 index 000000000..c167d5ce8 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_import_image.py @@ -0,0 +1,512 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: ec2_import_image +version_added: 7.0.0 +short_description: Manage AWS EC2 import image tasks +description: + - Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI). + - Cancel an in-process import virtual machine task. +options: + state: + description: + - Use I(state=present) to import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI). + - Use I(state=absent) to cancel an in-process import virtual machine task. + default: "present" + choices: ["present", "absent"] + type: str + task_name: + description: + - The name of the EC2 image import task. + type: str + aliases: ["name"] + required: true + architecture: + description: + - The architecture of the virtual machine. + type: str + choices: ["i386", "x86_64"] + client_data: + description: + - The client-specific data. + type: dict + suboptions: + comment: + description: + - A user-defined comment about the disk upload. + type: str + upload_end: + description: + - The time that the disk upload ends. + type: str + upload_size: + description: + - The size of the uploaded disk image, in GiB. + type: float + upload_start: + description: + - The time that the disk upload starts. + type: str + description: + description: + - A description string for the import image task. + type: str + disk_containers: + description: + - Information about the disk containers. + type: list + elements: dict + suboptions: + description: + description: + - The description of the disk image. + type: str + device_name: + description: + - The block device mapping for the disk. + type: str + format: + description: + - The format of the disk image being imported. + type: str + choices: ["OVA", "ova", "VHD", "vhd", "VHDX", "vhdx", "VMDK", "vmdk", "RAW", "raw"] + snapshot_id: + description: + - The ID of the EBS snapshot to be used for importing the snapshot. + type: str + url: + description: + - The URL to the Amazon S3-based disk image being imported. + The URL can either be a https URL (https://..) or an Amazon S3 URL (s3://..). + type: str + user_bucket: + description: + - The S3 bucket for the disk image. + type: dict + suboptions: + s3_bucket: + description: + - The name of the Amazon S3 bucket where the disk image is located. + type: str + s3_key: + description: + - The file name of the disk image. + type: str + encrypted: + description: + - Specifies whether the destination AMI of the imported image should be encrypted. + - The default KMS key for EBS is used unless you specify a non-default KMS key using I(kms_key_id). + type: bool + hypervisor: + description: + - The target hypervisor platform. + type: str + choices: ["xen"] + kms_key_id: + description: + - An identifier for the symmetric KMS key to use when creating the encrypted AMI. + This parameter is only required if you want to use a non-default KMS key; + if this parameter is not specified, the default KMS key for EBS is used. + If a I(kms_key_id) is specified, the I(encrypted) flag must also be set. + type: str + license_type: + description: + - The license type to be used for the Amazon Machine Image (AMI) after importing. + type: str + platform: + description: + - The operating system of the virtual machine. + type: str + choices: ["Windows", "Linux"] + role_name: + description: + - The name of the role to use when not using the default role, 'vmimport'. + type: str + license_specifications: + description: + - The ARNs of the license configurations. + type: list + elements: dict + suboptions: + license_configuration_arn: + description: + - The ARN of a license configuration. + type: str + boot_mode: + description: + - The boot mode of the virtual machine. + type: str + choices: ["legacy-bios", "uefi"] + cancel_reason: + description: + - The reason for canceling the task. + type: str + usage_operation: + description: + - The usage operation value. + type: str + tags: + description: + - The tags to apply to the import image task during creation. + type: dict + aliases: ["resource_tags"] +author: + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Import image + amazon.aws.ec2_import_image: + state: present + task_name: "clone-vm-import-image" + disk_containers: + - format: raw + user_bucket: + s3_bucket: "clone-vm-s3-bucket" + s3_key: "clone-vm-s3-bucket/ubuntu-vm-clone.raw" + +- name: Cancel an import image task + amazon.aws.ec2_import_image: + state: absent + task_name: "clone-vm-import-image" +""" + +RETURN = r""" +import_image: + description: A dict containing information about an EC2 import task. + returned: always + type: complex + contains: + task_name: + description: + - The name of the EC2 image import task. + type: str + architecture: + description: + - The architecture of the virtual machine. + type: str + image_id: + description: + - The ID of the Amazon Machine Image (AMI) created by the import task. + type: str + import_task_id: + description: + - The task ID of the import image task. + type: str + progress: + description: + - The progress of the task. + type: str + snapshot_details: + description: + - Describes the snapshot created from the imported disk. + type: dict + contains: + description: + description: + - A description for the snapshot. + type: str + device_name: + description: + - The block device mapping for the snapshot. + type: str + disk_image_size: + description: + - The size of the disk in the snapshot, in GiB. + type: float + format: + description: + - The format of the disk image from which the snapshot is created. + type: str + progress: + description: + - The percentage of progress for the task. + type: str + snapshot_id: + description: + - The snapshot ID of the disk being imported. + type: str + status: + description: + - A brief status of the snapshot creation. + type: str + status_message: + description: + - A detailed status message for the snapshot creation. + type: str + url: + description: + - The URL used to access the disk image. + type: str + user_bucket: + description: + - The Amazon S3 bucket for the disk image. + type: dict + status: + description: + - A brief status of the task. + type: str + status_message: + description: + - A detailed status message of the import task. + type: str + license_specifications: + description: + - The ARNs of the license configurations. + type: dict + usage_operation: + description: + - The usage operation value. + type: dict + description: + description: + - A description string for the import image task. + type: str + encrypted: + description: + - Specifies whether the destination AMI of the imported image should be encrypted. + type: bool + hypervisor: + description: + - The target hypervisor platform. + type: str + kms_key_id: + description: + - The identifier for the symmetric KMS key that was used to create the encrypted AMI. + type: str + license_type: + description: + - The license type to be used for the Amazon Machine Image (AMI) after importing. + type: str + platform: + description: + - The operating system of the virtual machine. + type: str + role_name: + description: + - The name of the role to use when not using the default role, 'vmimport'. + type: str + tags: + description: + - The tags to apply to the import image task during creation. + type: dict +""" + +import copy + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import helper_describe_import_image_tasks +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + + +def ensure_ec2_import_image_result(import_image_info): + result = {"import_image": {}} + if import_image_info: + image = copy.deepcopy(import_image_info[0]) + image["Tags"] = boto3_tag_list_to_ansible_dict(image["Tags"]) + result["import_image"] = camel_dict_to_snake_dict(image, ignore_list=["Tags"]) + return result + + +def absent(client, module): + """ + Cancel an in-process import virtual machine + """ + + filters = { + "Filters": [ + {"Name": "tag:Name", "Values": [module.params["task_name"]]}, + {"Name": "task-state", "Values": ["active"]}, + ] + } + + params = {} + + if module.params.get("cancel_reason"): + params["CancelReason"] = module.params["cancel_reason"] + + import_image_info = helper_describe_import_image_tasks(client, module, **filters) + + if import_image_info: + params["ImportTaskId"] = import_image_info[0]["ImportTaskId"] + import_image_info[0]["TaskName"] = module.params["task_name"] + + if module.check_mode: + module.exit_json(changed=True, msg="Would have cancelled the import task if not in check mode") + + try: + client.cancel_import_task(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to import the image") + else: + module.exit_json( + changed=False, + msg="The specified import task does not exist or it cannot be cancelled", + **{"import_image": {}}, + ) + + module.exit_json(changed=True, **ensure_ec2_import_image_result(import_image_info)) + + +def present(client, module): + params = {} + tags = module.params.get("tags") or {} + tags.update({"Name": module.params["task_name"]}) + + if module.params.get("architecture"): + params["Architecture"] = module.params["architecture"] + if module.params.get("client_data"): + params["ClientData"] = snake_dict_to_camel_dict(module.params["client_data"], capitalize_first=True) + if module.params.get("description"): + params["Description"] = module.params["description"] + if module.params.get("disk_containers"): + params["DiskContainers"] = snake_dict_to_camel_dict(module.params["disk_containers"], capitalize_first=True) + if module.params.get("encrypted"): + params["Encrypted"] = module.params["encrypted"] + if module.params.get("hypervisor"): + params["Hypervisor"] = module.params["hypervisor"] + if module.params.get("kms_key_id"): + params["KmsKeyId"] = module.params["kms_key_id"] + if module.params.get("license_type"): + params["LicenseType"] = module.params["license_type"] + if module.params.get("platform"): + params["Platform"] = module.params["platform"] + if module.params.get("role_name"): + params["RoleName"] = module.params["role_name"] + if module.params.get("license_specifications"): + params["LicenseSpecifications"] = snake_dict_to_camel_dict( + module.params["license_specifications"], capitalize_first=True + ) + if module.params.get("usage_operation"): + params["UsageOperation"] = module.params["usage_operation"] + if module.params.get("boot_mode"): + params["BootMode"] = module.params.get("boot_mode") + params["TagSpecifications"] = boto3_tag_specifications(tags, ["import-image-task"]) + + filters = { + "Filters": [ + {"Name": "tag:Name", "Values": [module.params["task_name"]]}, + {"Name": "task-state", "Values": ["completed", "active", "deleting"]}, + ] + } + import_image_info = helper_describe_import_image_tasks(client, module, **filters) + + if import_image_info: + import_image_info[0]["TaskName"] = module.params["task_name"] + module.exit_json( + changed=False, + msg="An import task with the specified name already exists", + **ensure_ec2_import_image_result(import_image_info), + ) + else: + if module.check_mode: + module.exit_json(changed=True, msg="Would have created the import task if not in check mode") + + params = scrub_none_parameters(params) + + try: + client.import_image(aws_retry=True, **params) + import_image_info = helper_describe_import_image_tasks(client, module, **filters) + import_image_info[0]["TaskName"] = module.params["task_name"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to import the image") + + module.exit_json(changed=True, **ensure_ec2_import_image_result(import_image_info)) + + +def main(): + argument_spec = dict( + architecture=dict(type="str", choices=["i386", "x86_64"]), + client_data=dict( + type="dict", + options=dict( + comment=dict(type="str"), + upload_end=dict(type="str"), + upload_size=dict(type="float"), + upload_start=dict(type="str"), + ), + ), + description=dict(type="str"), + license_specifications=dict( + type="list", + elements="dict", + options=dict( + license_configuration_arn=dict(type="str"), + ), + ), + encrypted=dict(type="bool"), + state=dict(default="present", choices=["present", "absent"]), + hypervisor=dict(type="str", choices=["xen"]), + kms_key_id=dict(type="str"), + license_type=dict(type="str", no_log=False), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + platform=dict(type="str", choices=["Windows", "Linux"]), + role_name=dict(type="str"), + disk_containers=dict( + type="list", + elements="dict", + options=dict( + description=dict(type="str"), + device_name=dict(type="str"), + format=dict( + type="str", choices=["OVA", "ova", "VHD", "vhd", "VHDX", "vhdx", "VMDK", "vmdk", "RAW", "raw"] + ), + snapshot_id=dict(type="str"), + url=dict(type="str"), + user_bucket=dict( + type="dict", + options=dict( + s3_bucket=dict(type="str"), + s3_key=dict(type="str", no_log=True), + ), + ), + ), + ), + usage_operation=dict(type="str"), + boot_mode=dict(type="str", choices=["legacy-bios", "uefi"]), + cancel_reason=dict(type="str"), + task_name=dict(type="str", aliases=["name"], required=True), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + state = module.params.get("state") + + try: + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS.") + + if state == "present": + present(client, module) + else: + absent(client, module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_import_image_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_import_image_info.py new file mode 100644 index 000000000..aa7fa2db1 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_import_image_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: ec2_import_image_info +version_added: 7.0.0 +short_description: Gather information about import virtual machine tasks +description: + - Displays details about an import virtual machine tasks that are already created. +author: + - Alina Buzachis (@alinabuzachis) +options: + import_task_ids: + description: The IDs of the import image tasks. + type: list + elements: str + aliases: ["ids"] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImportImageTasks.html) for possible filters. + type: list + elements: dict +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Check status of import image + amazon.aws.ec2_import_image_info: + filters: + - Name: "tag:Name" + Values: ["clone-vm-import-image"] + - Name: "task-state" + Values: ["completed", "active"] +""" + +RETURN = r""" +import_image: + description: A list of EC2 import tasks. + returned: always + type: complex + contains: + task_name: + description: + - The name of the EC2 image import task. + type: str + architecture: + description: + - The architecture of the virtual machine. + type: str + image_id: + description: + - The ID of the Amazon Machine Image (AMI) created by the import task. + type: str + import_task_id: + description: + - The task ID of the import image task. + type: str + progress: + description: + - The progress of the task. + type: str + snapshot_details: + description: + - Describes the snapshot created from the imported disk. + type: dict + contains: + description: + description: + - A description for the snapshot. + type: str + device_name: + description: + - The block device mapping for the snapshot. + type: str + disk_image_size: + description: + - The size of the disk in the snapshot, in GiB. + type: float + format: + description: + - The format of the disk image from which the snapshot is created. + type: str + progress: + description: + - The percentage of progress for the task. + type: str + snapshot_id: + description: + - The snapshot ID of the disk being imported. + type: str + status: + description: + - A brief status of the snapshot creation. + type: str + status_message: + description: + - A detailed status message for the snapshot creation. + type: str + url: + description: + - The URL used to access the disk image. + type: str + user_bucket: + description: + - The Amazon S3 bucket for the disk image. + type: dict + status: + description: + - A brief status of the task. + type: str + status_message: + description: + - A detailed status message of the import task. + type: str + license_specifications: + description: + - The ARNs of the license configurations. + type: dict + usage_operation: + description: + - The usage operation value. + type: dict + description: + description: + - A description string for the import image task. + type: str + encrypted: + description: + - Specifies whether the destination AMI of the imported image should be encrypted. + type: bool + hypervisor: + description: + - The target hypervisor platform. + type: str + kms_key_id: + description: + - The identifier for the symmetric KMS key that was used to create the encrypted AMI. + type: str + license_type: + description: + - The license type to be used for the Amazon Machine Image (AMI) after importing. + type: str + platform: + description: + - The operating system of the virtual machine. + type: str + role_name: + description: + - The name of the role to use when not using the default role, 'vmimport'. + type: str + tags: + description: + - The tags to apply to the import image task during creation. + type: dict +""" + +import copy + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import helper_describe_import_image_tasks +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +def ensure_ec2_import_image_result(import_image_info): + result = {"import_image": []} + if import_image_info: + for image in import_image_info: + image = copy.deepcopy(import_image_info[0]) + image["Tags"] = boto3_tag_list_to_ansible_dict(image["Tags"]) + result["import_image"].append(camel_dict_to_snake_dict(image, ignore_list=["Tags"])) + return result + + +def main(): + argument_spec = dict( + import_task_ids=dict(type="list", elements="str", aliases=["ids"]), + filters=dict(type="list", elements="dict"), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + params = {} + + if module.params.get("filters"): + params["Filters"] = module.params["filters"] + if module.params.get("import_task_ids"): + params["ImportTaskIds"] = module.params["import_task_ids"] + + import_image_info = helper_describe_import_image_tasks(client, module, **params) + + module.exit_json(**ensure_ec2_import_image_result(import_image_info)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py index 1cf5a5ddb..06089e4fe 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py @@ -1,11 +1,9 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - DOCUMENTATION = r""" --- module: ec2_instance @@ -52,11 +50,11 @@ options: type: int instance_type: description: - - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + - Instance type to use for the instance, see + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). - Only required when instance is not already present. - - If not specified, C(t2.micro) will be used. - - In a release after 2023-01-01 the default will be removed and either I(instance_type) or - I(launch_template) must be specificed when launching an instance. + - At least one of I(instance_type) or I(launch_template) must be specificed when launching an + instance. type: str count: description: @@ -227,6 +225,8 @@ options: launch_template: description: - The EC2 launch template to base instance configuration on. + - At least one of I(instance_type) or I(launch_template) must be specificed when launching an + instance. type: dict suboptions: id: @@ -258,6 +258,7 @@ options: tenancy: description: - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. + - This field is deprecated and will be removed in a release after 2025-12-01, use I(placement) instead. choices: ['dedicated', 'default'] type: str termination_protection: @@ -325,7 +326,58 @@ options: placement_group: description: - The placement group that needs to be assigned to the instance. + - This field is deprecated and will be removed in a release after 2025-12-01, use I(placement) instead. type: str + placement: + description: + - The location where the instance launched, if applicable. + type: dict + version_added: 7.0.0 + suboptions: + affinity: + description: The affinity setting for the instance on the Dedicated Host. + type: str + required: false + availability_zone: + description: The Availability Zone of the instance. + type: str + required: false + group_name: + description: The name of the placement group the instance is in. + type: str + required: false + host_id: + description: The ID of the Dedicated Host on which the instance resides. + type: str + required: false + host_resource_group_arn: + description: The ARN of the host resource group in which to launch the instances. + type: str + required: false + partition_number: + description: The number of the partition the instance is in. + type: int + required: false + tenancy: + description: Type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. + type: str + required: false + choices: ['dedicated', 'default'] + license_specifications: + description: + - The license specifications to be used for the instance. + type: list + elements: dict + suboptions: + license_configuration_arn: + description: The Amazon Resource Name (ARN) of the license configuration. + type: str + required: true + additional_info: + description: + - Reserved for Amazon's internal use. + type: str + version_added: 7.1.0 metadata_options: description: - Modify the metadata options for the instance. @@ -360,22 +412,20 @@ options: version_added: 4.0.0 type: str description: - - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). - - Requires botocore >= 1.21.29 + - Whether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). choices: [enabled, disabled] default: 'disabled' instance_metadata_tags: version_added: 4.0.0 type: str description: - - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). - - Requires botocore >= 1.23.30 + - Whether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). choices: [enabled, disabled] default: 'disabled' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 """ @@ -440,12 +490,12 @@ EXAMPLES = r""" Environment: Testing instance_type: c4.large volumes: - - device_name: /dev/sda1 - ebs: - delete_on_termination: true + - device_name: /dev/sda1 + ebs: + delete_on_termination: true cpu_options: - core_count: 1 - threads_per_core: 1 + core_count: 1 + threads_per_core: 1 - name: start an instance and have it begin a Tower callback on boot amazon.aws.ec2_instance: @@ -476,9 +526,9 @@ EXAMPLES = r""" tags: Env: "eni_on" volumes: - - device_name: /dev/sda1 - ebs: - delete_on_termination: true + - device_name: /dev/sda1 + ebs: + delete_on_termination: true instance_type: t2.micro image_id: ami-123456 @@ -534,6 +584,22 @@ EXAMPLES = r""" state: present tags: foo: bar + +# launches a mac instance with HostResourceGroupArn and LicenseSpecifications +- name: start a mac instance with a host resource group and license specifications + amazon.aws.ec2_instance: + name: "mac-compute-instance" + key_name: "prod-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + instance_type: mac1.metal + security_group: default + placement: + host_resource_group_arn: arn:aws:resource-groups:us-east-1:123456789012:group/MyResourceGroup + license_specifications: + - license_configuration_arn: arn:aws:license-manager:us-east-1:123456789012:license-configuration:lic-0123456789 + image_id: ami-123456 + tags: + Environment: Testing """ RETURN = r""" @@ -660,6 +726,17 @@ instances: returned: always type: str sample: "2017-03-23T22:51:24+00:00" + licenses: + description: The license configurations for the instance. + returned: When license specifications are provided. + type: list + elements: dict + contains: + license_configuration_arn: + description: The Amazon Resource Name (ARN) of the license configuration. + returned: always + type: str + sample: arn:aws:license-manager:us-east-1:123456789012:license-configuration:lic-0123456789 monitoring: description: The monitoring for the instance. returned: always @@ -843,16 +920,45 @@ instances: returned: always type: str sample: ap-southeast-2a + affinity: + description: The affinity setting for the instance on the Dedicated Host. + returned: When a placement group is specified. + type: str + group_id: + description: The ID of the placement group the instance is in (for cluster compute instances). + returned: always + type: str + sample: "pg-01234566" group_name: description: The name of the placement group the instance is in (for cluster compute instances). returned: always type: str - sample: "" + sample: "my-placement-group" + host_id: + description: The ID of the Dedicated Host on which the instance resides. + returned: always + type: str + host_resource_group_arn: + description: The ARN of the host resource group in which the instance is in. + returned: always + type: str + sample: "arn:aws:resource-groups:us-east-1:123456789012:group/MyResourceGroup" + partition_number: + description: The number of the partition the instance is in. + returned: always + type: int + sample: 1 tenancy: - description: The tenancy of the instance (if the instance is running in a VPC). + description: Type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. returned: always type: str sample: default + additional_info: + description: Reserved for Amazon's internal use. + returned: always + type: str + version_added: 7.1.0 + sample: private_dns_name: description: The private DNS name. returned: always @@ -962,9 +1068,9 @@ instances: sample: vpc-0011223344 """ -from collections import namedtuple import time import uuid +from collections import namedtuple try: import botocore @@ -977,63 +1083,68 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import parse_aws_arn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications from ansible_collections.amazon.aws.plugins.module_utils.tower import tower_callback_script +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list module = None +class Ec2InstanceAWSError(AnsibleAWSError): + pass + + def build_volume_spec(params): - volumes = params.get('volumes') or [] + volumes = params.get("volumes") or [] for volume in volumes: - if 'ebs' in volume: - for int_value in ['volume_size', 'iops']: - if int_value in volume['ebs']: - volume['ebs'][int_value] = int(volume['ebs'][int_value]) - if 'volume_type' in volume['ebs'] and volume['ebs']['volume_type'] == 'gp3': - if not volume['ebs'].get('iops'): - volume['ebs']['iops'] = 3000 - if 'throughput' in volume['ebs']: - volume['ebs']['throughput'] = int(volume['ebs']['throughput']) + if "ebs" in volume: + for int_value in ["volume_size", "iops"]: + if int_value in volume["ebs"]: + volume["ebs"][int_value] = int(volume["ebs"][int_value]) + if "volume_type" in volume["ebs"] and volume["ebs"]["volume_type"] == "gp3": + if not volume["ebs"].get("iops"): + volume["ebs"]["iops"] = 3000 + if "throughput" in volume["ebs"]: + volume["ebs"]["throughput"] = int(volume["ebs"]["throughput"]) else: - volume['ebs']['throughput'] = 125 + volume["ebs"]["throughput"] = 125 return [snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes] def add_or_update_instance_profile(instance, desired_profile_name): - instance_profile_setting = instance.get('IamInstanceProfile') + instance_profile_setting = instance.get("IamInstanceProfile") if instance_profile_setting and desired_profile_name: - if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')): + if desired_profile_name in (instance_profile_setting.get("Name"), instance_profile_setting.get("Arn")): # great, the profile we asked for is what's there return False else: desired_arn = determine_iam_role(desired_profile_name) - if instance_profile_setting.get('Arn') == desired_arn: + if instance_profile_setting.get("Arn") == desired_arn: return False # update association try: association = client.describe_iam_instance_profile_associations( - aws_retry=True, - Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}]) + aws_retry=True, Filters=[{"Name": "instance-id", "Values": [instance["InstanceId"]]}] + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # check for InvalidAssociationID.NotFound module.fail_json_aws(e, "Could not find instance profile association") try: client.replace_iam_instance_profile_association( aws_retry=True, - AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'], - IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)} + AssociationId=association["IamInstanceProfileAssociations"][0]["AssociationId"], + IamInstanceProfile={"Arn": determine_iam_role(desired_profile_name)}, ) return True except botocore.exceptions.ClientError as e: @@ -1044,8 +1155,8 @@ def add_or_update_instance_profile(instance, desired_profile_name): try: client.associate_iam_instance_profile( aws_retry=True, - IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}, - InstanceId=instance['InstanceId'] + IamInstanceProfile={"Arn": determine_iam_role(desired_profile_name)}, + InstanceId=instance["InstanceId"], ) return True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -1085,76 +1196,80 @@ def build_network_spec(params): """ interfaces = [] - network = params.get('network') or {} - if not network.get('interfaces'): + network = params.get("network") or {} + if not network.get("interfaces"): # they only specified one interface spec = { - 'DeviceIndex': 0, + "DeviceIndex": 0, } - if network.get('assign_public_ip') is not None: - spec['AssociatePublicIpAddress'] = network['assign_public_ip'] + if network.get("assign_public_ip") is not None: + spec["AssociatePublicIpAddress"] = network["assign_public_ip"] - if params.get('vpc_subnet_id'): - spec['SubnetId'] = params['vpc_subnet_id'] + if params.get("vpc_subnet_id"): + spec["SubnetId"] = params["vpc_subnet_id"] else: default_vpc = get_default_vpc() if default_vpc is None: module.fail_json( - msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance") + msg=( + "No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter)" + " to create an instance" + ) + ) else: - sub = get_default_subnet(default_vpc, availability_zone=module.params.get('availability_zone')) - spec['SubnetId'] = sub['SubnetId'] + sub = get_default_subnet(default_vpc, availability_zone=module.params.get("availability_zone")) + spec["SubnetId"] = sub["SubnetId"] - if network.get('private_ip_address'): - spec['PrivateIpAddress'] = network['private_ip_address'] + if network.get("private_ip_address"): + spec["PrivateIpAddress"] = network["private_ip_address"] - if params.get('security_group') or params.get('security_groups'): + if params.get("security_group") or params.get("security_groups"): groups = discover_security_groups( - group=params.get('security_group'), - groups=params.get('security_groups'), - subnet_id=spec['SubnetId'], + group=params.get("security_group"), + groups=params.get("security_groups"), + subnet_id=spec["SubnetId"], ) - spec['Groups'] = groups - if network.get('description') is not None: - spec['Description'] = network['description'] + spec["Groups"] = groups + if network.get("description") is not None: + spec["Description"] = network["description"] # TODO more special snowflake network things return [spec] # handle list of `network.interfaces` options - for idx, interface_params in enumerate(network.get('interfaces', [])): + for idx, interface_params in enumerate(network.get("interfaces", [])): spec = { - 'DeviceIndex': idx, + "DeviceIndex": idx, } if isinstance(interface_params, string_types): # naive case where user gave # network_interfaces: [eni-1234, eni-4567, ....] # put into normal data structure so we don't dupe code - interface_params = {'id': interface_params} + interface_params = {"id": interface_params} - if interface_params.get('id') is not None: + if interface_params.get("id") is not None: # if an ID is provided, we don't want to set any other parameters. - spec['NetworkInterfaceId'] = interface_params['id'] + spec["NetworkInterfaceId"] = interface_params["id"] interfaces.append(spec) continue - spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True) + spec["DeleteOnTermination"] = interface_params.get("delete_on_termination", True) - if interface_params.get('ipv6_addresses'): - spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])] + if interface_params.get("ipv6_addresses"): + spec["Ipv6Addresses"] = [{"Ipv6Address": a} for a in interface_params.get("ipv6_addresses", [])] - if interface_params.get('private_ip_address'): - spec['PrivateIpAddress'] = interface_params.get('private_ip_address') + if interface_params.get("private_ip_address"): + spec["PrivateIpAddress"] = interface_params.get("private_ip_address") - if interface_params.get('description'): - spec['Description'] = interface_params.get('description') + if interface_params.get("description"): + spec["Description"] = interface_params.get("description") - if interface_params.get('subnet_id', params.get('vpc_subnet_id')): - spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id')) - elif not spec.get('SubnetId') and not interface_params['id']: + if interface_params.get("subnet_id", params.get("vpc_subnet_id")): + spec["SubnetId"] = interface_params.get("subnet_id", params.get("vpc_subnet_id")) + elif not spec.get("SubnetId") and not interface_params["id"]: # TODO grab a subnet from default VPC - raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params)) + raise ValueError(f"Failed to assign subnet to interface {interface_params}") interfaces.append(spec) return interfaces @@ -1162,57 +1277,58 @@ def build_network_spec(params): def warn_if_public_ip_assignment_changed(instance): # This is a non-modifiable attribute. - assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip') + assign_public_ip = (module.params.get("network") or {}).get("assign_public_ip") if assign_public_ip is None: return # Check that public ip assignment is the same and warn if not - public_dns_name = instance.get('PublicDnsName') + public_dns_name = instance.get("PublicDnsName") if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name): module.warn( - "Unable to modify public ip assignment to {0} for instance {1}. " - "Whether or not to assign a public IP is determined during instance creation.".format( - assign_public_ip, instance['InstanceId'])) + f"Unable to modify public ip assignment to {assign_public_ip} for instance {instance['InstanceId']}." + " Whether or not to assign a public IP is determined during instance creation." + ) def warn_if_cpu_options_changed(instance): # This is a non-modifiable attribute. - cpu_options = module.params.get('cpu_options') + cpu_options = module.params.get("cpu_options") if cpu_options is None: return # Check that the CpuOptions set are the same and warn if not - core_count_curr = instance['CpuOptions'].get('CoreCount') - core_count = cpu_options.get('core_count') - threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore') - threads_per_core = cpu_options.get('threads_per_core') + core_count_curr = instance["CpuOptions"].get("CoreCount") + core_count = cpu_options.get("core_count") + threads_per_core_curr = instance["CpuOptions"].get("ThreadsPerCore") + threads_per_core = cpu_options.get("threads_per_core") if core_count_curr != core_count: module.warn( - "Unable to modify core_count from {0} to {1}. " - "Assigning a number of core is determinted during instance creation".format( - core_count_curr, core_count)) + f"Unable to modify core_count from {core_count_curr} to {core_count}. Assigning a number of core is" + " determinted during instance creation" + ) if threads_per_core_curr != threads_per_core: module.warn( - "Unable to modify threads_per_core from {0} to {1}. " - "Assigning a number of threads per core is determined during instance creation.".format( - threads_per_core_curr, threads_per_core)) + f"Unable to modify threads_per_core from {threads_per_core_curr} to {threads_per_core}. Assigning a number" + " of threads per core is determined during instance creation." + ) def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None): - if subnet_id is not None: try: sub = client.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) - except is_boto3_error_code('InvalidGroup.NotFound'): + except is_boto3_error_code("InvalidGroup.NotFound"): module.fail_json( - "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format( - subnet_id - ) + f"Could not find subnet {subnet_id} to associate security groups. Please check the vpc_subnet_id and" + " security_groups parameters." ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) - parent_vpc_id = sub['Subnets'][0]['VpcId'] + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Error while searching for subnet {subnet_id} parent VPC.") + parent_vpc_id = sub["Subnets"][0]["VpcId"] if group: return get_ec2_security_group_ids_from_names(group, client, vpc_id=parent_vpc_id) @@ -1222,9 +1338,9 @@ def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None): def build_userdata(params): - if params.get('user_data') is not None: - return {'UserData': to_native(params.get('user_data'))} - if params.get('aap_callback'): + if params.get("user_data") is not None: + return {"UserData": to_native(params.get("user_data"))} + if params.get("aap_callback"): userdata = tower_callback_script( tower_address=params.get("aap_callback").get("tower_address"), job_template_id=params.get("aap_callback").get("job_template_id"), @@ -1232,109 +1348,125 @@ def build_userdata(params): windows=params.get("aap_callback").get("windows"), passwd=params.get("aap_callback").get("set_password"), ) - return {'UserData': userdata} + return {"UserData": userdata} return {} def build_top_level_options(params): spec = {} - if params.get('image_id'): - spec['ImageId'] = params['image_id'] - elif isinstance(params.get('image'), dict): - image = params.get('image', {}) - spec['ImageId'] = image.get('id') - if 'ramdisk' in image: - spec['RamdiskId'] = image['ramdisk'] - if 'kernel' in image: - spec['KernelId'] = image['kernel'] - if not spec.get('ImageId') and not params.get('launch_template'): - module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.") - - if params.get('key_name') is not None: - spec['KeyName'] = params.get('key_name') + if params.get("image_id"): + spec["ImageId"] = params["image_id"] + elif isinstance(params.get("image"), dict): + image = params.get("image", {}) + spec["ImageId"] = image.get("id") + if "ramdisk" in image: + spec["RamdiskId"] = image["ramdisk"] + if "kernel" in image: + spec["KernelId"] = image["kernel"] + if not spec.get("ImageId") and not params.get("launch_template"): + module.fail_json( + msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template." + ) + + if params.get("key_name") is not None: + spec["KeyName"] = params.get("key_name") spec.update(build_userdata(params)) - if params.get('launch_template') is not None: - spec['LaunchTemplate'] = {} - if not params.get('launch_template').get('id') and not params.get('launch_template').get('name'): - module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required") - - if params.get('launch_template').get('id') is not None: - spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id') - if params.get('launch_template').get('name') is not None: - spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name') - if params.get('launch_template').get('version') is not None: - spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version')) - - if params.get('detailed_monitoring', False): - spec['Monitoring'] = {'Enabled': True} - if params.get('cpu_credit_specification') is not None: - spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')} - if params.get('tenancy') is not None: - spec['Placement'] = {'Tenancy': params.get('tenancy')} - if params.get('placement_group'): - if 'Placement' in spec: - spec['Placement']['GroupName'] = str(params.get('placement_group')) + if params.get("launch_template") is not None: + spec["LaunchTemplate"] = {} + if not params.get("launch_template").get("id") and not params.get("launch_template").get("name"): + module.fail_json( + msg=( + "Could not create instance with launch template. Either launch_template.name or launch_template.id" + " parameters are required" + ) + ) + + if params.get("launch_template").get("id") is not None: + spec["LaunchTemplate"]["LaunchTemplateId"] = params.get("launch_template").get("id") + if params.get("launch_template").get("name") is not None: + spec["LaunchTemplate"]["LaunchTemplateName"] = params.get("launch_template").get("name") + if params.get("launch_template").get("version") is not None: + spec["LaunchTemplate"]["Version"] = to_native(params.get("launch_template").get("version")) + + if params.get("detailed_monitoring", False): + spec["Monitoring"] = {"Enabled": True} + if params.get("cpu_credit_specification") is not None: + spec["CreditSpecification"] = {"CpuCredits": params.get("cpu_credit_specification")} + if params.get("tenancy") is not None: + spec["Placement"] = {"Tenancy": params.get("tenancy")} + if params.get("placement_group"): + if "Placement" in spec: + spec["Placement"]["GroupName"] = str(params.get("placement_group")) else: - spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))}) - if params.get('ebs_optimized') is not None: - spec['EbsOptimized'] = params.get('ebs_optimized') - if params.get('instance_initiated_shutdown_behavior'): - spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior') - if params.get('termination_protection') is not None: - spec['DisableApiTermination'] = params.get('termination_protection') - if params.get('hibernation_options') and params.get('volumes'): - for vol in params['volumes']: - if vol.get('ebs') and vol['ebs'].get('encrypted'): - spec['HibernationOptions'] = {'Configured': True} + spec.setdefault("Placement", {"GroupName": str(params.get("placement_group"))}) + if params.get("placement") is not None: + spec["Placement"] = {} + if params.get("placement").get("availability_zone") is not None: + spec["Placement"]["AvailabilityZone"] = params.get("placement").get("availability_zone") + if params.get("placement").get("affinity") is not None: + spec["Placement"]["Affinity"] = params.get("placement").get("affinity") + if params.get("placement").get("group_name") is not None: + spec["Placement"]["GroupName"] = params.get("placement").get("group_name") + if params.get("placement").get("host_id") is not None: + spec["Placement"]["HostId"] = params.get("placement").get("host_id") + if params.get("placement").get("host_resource_group_arn") is not None: + spec["Placement"]["HostResourceGroupArn"] = params.get("placement").get("host_resource_group_arn") + if params.get("placement").get("partition_number") is not None: + spec["Placement"]["PartitionNumber"] = params.get("placement").get("partition_number") + if params.get("placement").get("tenancy") is not None: + spec["Placement"]["Tenancy"] = params.get("placement").get("tenancy") + if params.get("ebs_optimized") is not None: + spec["EbsOptimized"] = params.get("ebs_optimized") + if params.get("instance_initiated_shutdown_behavior"): + spec["InstanceInitiatedShutdownBehavior"] = params.get("instance_initiated_shutdown_behavior") + if params.get("termination_protection") is not None: + spec["DisableApiTermination"] = params.get("termination_protection") + if params.get("hibernation_options") and params.get("volumes"): + for vol in params["volumes"]: + if vol.get("ebs") and vol["ebs"].get("encrypted"): + spec["HibernationOptions"] = {"Configured": True} else: module.fail_json( - msg="Hibernation prerequisites not satisfied. Refer {0}".format( - "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html") + msg=( + "Hibernation prerequisites not satisfied. Refer to" + " https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html" + ) ) - if params.get('cpu_options') is not None: - spec['CpuOptions'] = {} - spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core') - spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count') - if params.get('metadata_options'): - spec['MetadataOptions'] = {} - spec['MetadataOptions']['HttpEndpoint'] = params.get( - 'metadata_options').get('http_endpoint') - spec['MetadataOptions']['HttpTokens'] = params.get( - 'metadata_options').get('http_tokens') - spec['MetadataOptions']['HttpPutResponseHopLimit'] = params.get( - 'metadata_options').get('http_put_response_hop_limit') - - if not module.botocore_at_least('1.23.30'): - # fail only if enabled is requested - if params.get('metadata_options').get('instance_metadata_tags') == 'enabled': - module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags') - else: - spec['MetadataOptions']['InstanceMetadataTags'] = params.get( - 'metadata_options').get('instance_metadata_tags') - - if not module.botocore_at_least('1.21.29'): - # fail only if enabled is requested - if params.get('metadata_options').get('http_protocol_ipv6') == 'enabled': - module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6') - else: - spec['MetadataOptions']['HttpProtocolIpv6'] = params.get( - 'metadata_options').get('http_protocol_ipv6') - + if params.get("cpu_options") is not None: + spec["CpuOptions"] = {} + spec["CpuOptions"]["ThreadsPerCore"] = params.get("cpu_options").get("threads_per_core") + spec["CpuOptions"]["CoreCount"] = params.get("cpu_options").get("core_count") + if params.get("metadata_options"): + spec["MetadataOptions"] = {} + spec["MetadataOptions"]["HttpEndpoint"] = params.get("metadata_options").get("http_endpoint") + spec["MetadataOptions"]["HttpTokens"] = params.get("metadata_options").get("http_tokens") + spec["MetadataOptions"]["HttpPutResponseHopLimit"] = params.get("metadata_options").get( + "http_put_response_hop_limit" + ) + spec["MetadataOptions"]["HttpProtocolIpv6"] = params.get("metadata_options").get("http_protocol_ipv6") + spec["MetadataOptions"]["InstanceMetadataTags"] = params.get("metadata_options").get("instance_metadata_tags") + if params.get("additional_info"): + spec["AdditionalInfo"] = params.get("additional_info") + if params.get("license_specifications"): + spec["LicenseSpecifications"] = [] + for license_configuration in params.get("license_specifications"): + spec["LicenseSpecifications"].append( + {"LicenseConfigurationArn": license_configuration.get("license_configuration_arn")} + ) return spec def build_instance_tags(params, propagate_tags_to_volumes=True): - tags = params.get('tags') or {} - if params.get('name') is not None: - tags['Name'] = params.get('name') - specs = boto3_tag_specifications(tags, ['volume', 'instance']) + tags = params.get("tags") or {} + if params.get("name") is not None: + tags["Name"] = params.get("name") + specs = boto3_tag_specifications(tags, ["volume", "instance"]) return specs -def build_run_instance_spec(params): - +def build_run_instance_spec(params, current_count=0): spec = dict( ClientToken=uuid.uuid4().hex, MaxCount=1, @@ -1342,36 +1474,38 @@ def build_run_instance_spec(params): ) spec.update(**build_top_level_options(params)) - spec['NetworkInterfaces'] = build_network_spec(params) - spec['BlockDeviceMappings'] = build_volume_spec(params) + spec["NetworkInterfaces"] = build_network_spec(params) + spec["BlockDeviceMappings"] = build_volume_spec(params) tag_spec = build_instance_tags(params) if tag_spec is not None: - spec['TagSpecifications'] = tag_spec + spec["TagSpecifications"] = tag_spec # IAM profile - if params.get('iam_instance_profile'): - spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('iam_instance_profile'))) + if params.get("iam_instance_profile"): + spec["IamInstanceProfile"] = dict(Arn=determine_iam_role(params.get("iam_instance_profile"))) - if params.get('exact_count'): - spec['MaxCount'] = params.get('to_launch') - spec['MinCount'] = params.get('to_launch') + if params.get("exact_count"): + spec["MaxCount"] = params.get("exact_count") - current_count + spec["MinCount"] = params.get("exact_count") - current_count - if params.get('count'): - spec['MaxCount'] = params.get('count') - spec['MinCount'] = params.get('count') + if params.get("count"): + spec["MaxCount"] = params.get("count") + spec["MinCount"] = params.get("count") - if not params.get('launch_template'): - spec['InstanceType'] = params['instance_type'] if params.get('instance_type') else 't2.micro' + if params.get("instance_type"): + spec["InstanceType"] = params["instance_type"] - if params.get('launch_template') and params.get('instance_type'): - spec['InstanceType'] = params['instance_type'] + if not (params.get("instance_type") or params.get("launch_template")): + raise Ec2InstanceAWSError( + "At least one of 'instance_type' and 'launch_template' must be passed when launching instances." + ) return spec -def await_instances(ids, desired_module_state='present', force_wait=False): - if not module.params.get('wait', True) and not force_wait: +def await_instances(ids, desired_module_state="present", force_wait=False): + if not module.params.get("wait", True) and not force_wait: # the user asked not to wait for anything return @@ -1381,33 +1515,35 @@ def await_instances(ids, desired_module_state='present', force_wait=False): # Map ansible state to boto3 waiter type state_to_boto3_waiter = { - 'present': 'instance_exists', - 'started': 'instance_status_ok', - 'running': 'instance_running', - 'stopped': 'instance_stopped', - 'restarted': 'instance_status_ok', - 'rebooted': 'instance_running', - 'terminated': 'instance_terminated', - 'absent': 'instance_terminated', + "present": "instance_exists", + "started": "instance_status_ok", + "running": "instance_running", + "stopped": "instance_stopped", + "restarted": "instance_status_ok", + "rebooted": "instance_running", + "terminated": "instance_terminated", + "absent": "instance_terminated", } if desired_module_state not in state_to_boto3_waiter: - module.fail_json(msg="Cannot wait for state {0}, invalid state".format(desired_module_state)) + module.fail_json(msg=f"Cannot wait for state {desired_module_state}, invalid state") boto3_waiter_type = state_to_boto3_waiter[desired_module_state] waiter = client.get_waiter(boto3_waiter_type) try: waiter.wait( InstanceIds=ids, WaiterConfig={ - 'Delay': 15, - 'MaxAttempts': module.params.get('wait_timeout', 600) // 15, - } + "Delay": 15, + "MaxAttempts": module.params.get("wait_timeout", 600) // 15, + }, ) except botocore.exceptions.WaiterConfigError as e: - module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format( - to_native(e), ', '.join(ids), boto3_waiter_type)) + instance_ids = ", ".join(ids) + module.fail_json( + msg=f"{to_native(e)}. Error waiting for instances {instance_ids} to reach state {boto3_waiter_type}" + ) except botocore.exceptions.WaiterError as e: - module.warn("Instances {0} took too long to reach state {1}. {2}".format( - ', '.join(ids), boto3_waiter_type, to_native(e))) + instance_ids = ", ".join(ids) + module.warn(f"Instances {instance_ids} took too long to reach state {boto3_waiter_type}. {to_native(e)}") def diff_instance_and_params(instance, params, skip=None): @@ -1417,16 +1553,16 @@ def diff_instance_and_params(instance, params, skip=None): skip = [] changes_to_apply = [] - id_ = instance['InstanceId'] + id_ = instance["InstanceId"] - ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value']) + ParamMapper = namedtuple("ParamMapper", ["param_key", "instance_key", "attribute_name", "add_value"]) def value_wrapper(v): - return {'Value': v} + return {"Value": v} param_mappings = [ - ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper), - ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper), + ParamMapper("ebs_optimized", "EbsOptimized", "ebsOptimized", value_wrapper), + ParamMapper("termination_protection", "DisableApiTermination", "disableApiTermination", value_wrapper), # user data is an immutable property # ParamMapper('user_data', 'UserData', 'userData', value_wrapper), ] @@ -1440,67 +1576,110 @@ def diff_instance_and_params(instance, params, skip=None): try: value = client.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe attribute {0} for instance {1}".format(mapping.attribute_name, id_)) - if value[mapping.instance_key]['Value'] != params.get(mapping.param_key): + module.fail_json_aws(e, msg=f"Could not describe attribute {mapping.attribute_name} for instance {id_}") + if value[mapping.instance_key]["Value"] != params.get(mapping.param_key): arguments = dict( - InstanceId=instance['InstanceId'], + InstanceId=instance["InstanceId"], # Attribute=mapping.attribute_name, ) arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key)) changes_to_apply.append(arguments) - if params.get('security_group') or params.get('security_groups'): + if params.get("security_group") or params.get("security_groups"): try: value = client.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe attribute groupSet for instance {0}".format(id_)) + module.fail_json_aws(e, msg=f"Could not describe attribute groupSet for instance {id_}") # managing security groups - if params.get('vpc_subnet_id'): - subnet_id = params.get('vpc_subnet_id') + if params.get("vpc_subnet_id"): + subnet_id = params.get("vpc_subnet_id") else: default_vpc = get_default_vpc() if default_vpc is None: module.fail_json( - msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to modify security groups.") + msg=( + "No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter)" + " to modify security groups." + ) + ) else: sub = get_default_subnet(default_vpc) - subnet_id = sub['SubnetId'] + subnet_id = sub["SubnetId"] groups = discover_security_groups( - group=params.get('security_group'), - groups=params.get('security_groups'), + group=params.get("security_group"), + groups=params.get("security_groups"), subnet_id=subnet_id, ) expected_groups = groups - instance_groups = [g['GroupId'] for g in value['Groups']] + instance_groups = [g["GroupId"] for g in value["Groups"]] if set(instance_groups) != set(expected_groups): - changes_to_apply.append(dict( - Groups=expected_groups, - InstanceId=instance['InstanceId'] - )) + changes_to_apply.append(dict(Groups=expected_groups, InstanceId=instance["InstanceId"])) - if (params.get('network') or {}).get('source_dest_check') is not None: + if (params.get("network") or {}).get("source_dest_check") is not None: # network.source_dest_check is nested, so needs to be treated separately - check = bool(params.get('network').get('source_dest_check')) - if instance['SourceDestCheck'] != check: - changes_to_apply.append(dict( - InstanceId=instance['InstanceId'], - SourceDestCheck={'Value': check}, - )) + check = bool(params.get("network").get("source_dest_check")) + if instance["SourceDestCheck"] != check: + changes_to_apply.append( + dict( + InstanceId=instance["InstanceId"], + SourceDestCheck={"Value": check}, + ) + ) return changes_to_apply +def change_instance_metadata_options(instance, params): + metadata_options_to_apply = params.get("metadata_options") + + if metadata_options_to_apply is None: + return False + + existing_metadata_options = camel_dict_to_snake_dict(instance.get("MetadataOptions")) + + changes_to_apply = { + key: metadata_options_to_apply[key] + for key in set(existing_metadata_options) & set(metadata_options_to_apply) + if existing_metadata_options[key] != metadata_options_to_apply[key] + } + + if not changes_to_apply: + return False + + request_args = { + "InstanceId": instance["InstanceId"], + "HttpTokens": changes_to_apply.get("http_tokens") or existing_metadata_options.get("http_tokens"), + "HttpPutResponseHopLimit": changes_to_apply.get("http_put_response_hop_limit") + or existing_metadata_options.get("http_put_response_hop_limit"), + "HttpEndpoint": changes_to_apply.get("http_endpoint") or existing_metadata_options.get("http_endpoint"), + "HttpProtocolIpv6": changes_to_apply.get("http_protocol_ipv6") + or existing_metadata_options.get("http_protocol_ipv6"), + "InstanceMetadataTags": changes_to_apply.get("instance_metadata_tags") + or existing_metadata_options.get("instance_metadata_tags"), + } + + if module.check_mode: + return True + try: + client.modify_instance_metadata_options(aws_retry=True, **request_args) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg=f"Failed to update instance metadata options for instance ID: {instance['InstanceId']}" + ) + return True + + def change_network_attachments(instance, params): - if (params.get('network') or {}).get('interfaces') is not None: + if (params.get("network") or {}).get("interfaces") is not None: new_ids = [] - for inty in params.get('network').get('interfaces'): - if isinstance(inty, dict) and 'id' in inty: - new_ids.append(inty['id']) + for inty in params.get("network").get("interfaces"): + if isinstance(inty, dict) and "id" in inty: + new_ids.append(inty["id"]) elif isinstance(inty, string_types): new_ids.append(inty) # network.interfaces can create the need to attach new interfaces - old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']] + old_ids = [inty["NetworkInterfaceId"] for inty in instance["NetworkInterfaces"]] to_attach = set(new_ids) - set(old_ids) if not module.check_mode: for eni_id in to_attach: @@ -1545,19 +1724,17 @@ def find_instances(ids=None, filters=None): @AWSRetry.jittered_backoff() def _describe_instances(**params): - paginator = client.get_paginator('describe_instances') - return paginator.paginate(**params).search('Reservations[].Instances[]') + paginator = client.get_paginator("describe_instances") + return paginator.paginate(**params).search("Reservations[].Instances[]") def get_default_vpc(): try: - vpcs = client.describe_vpcs( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) + vpcs = client.describe_vpcs(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list({"isDefault": "true"})) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Could not describe default VPC") - if len(vpcs.get('Vpcs', [])): - return vpcs.get('Vpcs')[0] + if len(vpcs.get("Vpcs", [])): + return vpcs.get("Vpcs")[0] return None @@ -1565,46 +1742,50 @@ def get_default_subnet(vpc, availability_zone=None): try: subnets = client.describe_subnets( aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list({ - 'vpc-id': vpc['VpcId'], - 'state': 'available', - 'default-for-az': 'true', - }) + Filters=ansible_dict_to_boto3_filter_list( + { + "vpc-id": vpc["VpcId"], + "state": "available", + "default-for-az": "true", + } + ), ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe default subnets for VPC {0}".format(vpc['VpcId'])) - if len(subnets.get('Subnets', [])): + module.fail_json_aws(e, msg=f"Could not describe default subnets for VPC {vpc['VpcId']}") + if len(subnets.get("Subnets", [])): if availability_zone is not None: - subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets')) + subs_by_az = dict((subnet["AvailabilityZone"], subnet) for subnet in subnets.get("Subnets")) if availability_zone in subs_by_az: return subs_by_az[availability_zone] # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list - by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone']) + by_az = sorted(subnets.get("Subnets"), key=lambda s: s["AvailabilityZone"]) return by_az[0] return None -def ensure_instance_state(desired_module_state): +def ensure_instance_state(desired_module_state, filters): """ Sets return keys depending on the desired instance state """ results = dict() changed = False - if desired_module_state in ('running', 'started'): + if desired_module_state in ("running", "started"): _changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), desired_module_state=desired_module_state) + filters=filters, desired_module_state=desired_module_state + ) changed |= bool(len(_changed)) if failed: module.fail_json( - msg="Unable to start instances: {0}".format(failure_reason), + msg=f"Unable to start instances: {failure_reason}", reboot_success=list(_changed), - reboot_failed=failed) + reboot_failed=failed, + ) results = dict( - msg='Instances started', + msg="Instances started", start_success=list(_changed), start_failed=[], # Avoid breaking things 'reboot' is wrong but used to be returned @@ -1613,74 +1794,78 @@ def ensure_instance_state(desired_module_state): changed=changed, instances=[pretty_instance(i) for i in instances], ) - elif desired_module_state in ('restarted', 'rebooted'): + elif desired_module_state in ("restarted", "rebooted"): # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-reboot.html # The Ansible behaviour of issuing a stop/start has a minor impact on user billing # This will need to be changelogged if we ever change to client.reboot_instance _changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), - desired_module_state='stopped', + filters=filters, + desired_module_state="stopped", ) if failed: module.fail_json( - msg="Unable to stop instances: {0}".format(failure_reason), + msg=f"Unable to stop instances: {failure_reason}", stop_success=list(_changed), - stop_failed=failed) + stop_failed=failed, + ) changed |= bool(len(_changed)) _changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), + filters=filters, desired_module_state=desired_module_state, ) changed |= bool(len(_changed)) if failed: module.fail_json( - msg="Unable to restart instances: {0}".format(failure_reason), + msg=f"Unable to restart instances: {failure_reason}", reboot_success=list(_changed), - reboot_failed=failed) + reboot_failed=failed, + ) results = dict( - msg='Instances restarted', + msg="Instances restarted", reboot_success=list(_changed), changed=changed, reboot_failed=[], instances=[pretty_instance(i) for i in instances], ) - elif desired_module_state in ('stopped',): + elif desired_module_state in ("stopped",): _changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), + filters=filters, desired_module_state=desired_module_state, ) changed |= bool(len(_changed)) if failed: module.fail_json( - msg="Unable to stop instances: {0}".format(failure_reason), + msg=f"Unable to stop instances: {failure_reason}", stop_success=list(_changed), - stop_failed=failed) + stop_failed=failed, + ) results = dict( - msg='Instances stopped', + msg="Instances stopped", stop_success=list(_changed), changed=changed, stop_failed=[], instances=[pretty_instance(i) for i in instances], ) - elif desired_module_state in ('absent', 'terminated'): + elif desired_module_state in ("absent", "terminated"): terminated, terminate_failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), + filters=filters, desired_module_state=desired_module_state, ) if terminate_failed: module.fail_json( - msg="Unable to terminate instances: {0}".format(failure_reason), + msg=f"Unable to terminate instances: {failure_reason}", terminate_success=list(terminated), - terminate_failed=terminate_failed) + terminate_failed=terminate_failed, + ) results = dict( - msg='Instances terminated', + msg="Instances terminated", terminate_success=list(terminated), changed=bool(len(terminated)), terminate_failed=[], @@ -1690,71 +1875,70 @@ def ensure_instance_state(desired_module_state): def change_instance_state(filters, desired_module_state): - # Map ansible state to ec2 state ec2_instance_states = { - 'present': 'running', - 'started': 'running', - 'running': 'running', - 'stopped': 'stopped', - 'restarted': 'running', - 'rebooted': 'running', - 'terminated': 'terminated', - 'absent': 'terminated', + "present": "running", + "started": "running", + "running": "running", + "stopped": "stopped", + "restarted": "running", + "rebooted": "running", + "terminated": "terminated", + "absent": "terminated", } desired_ec2_state = ec2_instance_states[desired_module_state] changed = set() instances = find_instances(filters=filters) - to_change = set(i['InstanceId'] for i in instances if i['State']['Name'] != desired_ec2_state) + to_change = set(i["InstanceId"] for i in instances if i["State"]["Name"] != desired_ec2_state) unchanged = set() failure_reason = "" for inst in instances: try: - if desired_ec2_state == 'terminated': + if desired_ec2_state == "terminated": # Before terminating an instance we need for them to leave # 'pending' or 'stopping' (if they're in those states) - if inst['State']['Name'] == 'stopping': - await_instances([inst['InstanceId']], desired_module_state='stopped', force_wait=True) - elif inst['State']['Name'] == 'pending': - await_instances([inst['InstanceId']], desired_module_state='running', force_wait=True) + if inst["State"]["Name"] == "stopping": + await_instances([inst["InstanceId"]], desired_module_state="stopped", force_wait=True) + elif inst["State"]["Name"] == "pending": + await_instances([inst["InstanceId"]], desired_module_state="running", force_wait=True) if module.check_mode: - changed.add(inst['InstanceId']) + changed.add(inst["InstanceId"]) continue # TODO use a client-token to prevent double-sends of these start/stop/terminate commands # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html - resp = client.terminate_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']] - if desired_ec2_state == 'stopped': + resp = client.terminate_instances(aws_retry=True, InstanceIds=[inst["InstanceId"]]) + [changed.add(i["InstanceId"]) for i in resp["TerminatingInstances"]] + if desired_ec2_state == "stopped": # Before stopping an instance we need for them to leave # 'pending' - if inst['State']['Name'] == 'pending': - await_instances([inst['InstanceId']], desired_module_state='running', force_wait=True) + if inst["State"]["Name"] == "pending": + await_instances([inst["InstanceId"]], desired_module_state="running", force_wait=True) # Already moving to the relevant state - elif inst['State']['Name'] in ('stopping', 'stopped'): - unchanged.add(inst['InstanceId']) + elif inst["State"]["Name"] in ("stopping", "stopped"): + unchanged.add(inst["InstanceId"]) continue if module.check_mode: - changed.add(inst['InstanceId']) + changed.add(inst["InstanceId"]) continue - resp = client.stop_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['StoppingInstances']] - if desired_ec2_state == 'running': - if inst['State']['Name'] in ('pending', 'running'): - unchanged.add(inst['InstanceId']) + resp = client.stop_instances(aws_retry=True, InstanceIds=[inst["InstanceId"]]) + [changed.add(i["InstanceId"]) for i in resp["StoppingInstances"]] + if desired_ec2_state == "running": + if inst["State"]["Name"] in ("pending", "running"): + unchanged.add(inst["InstanceId"]) continue - elif inst['State']['Name'] == 'stopping': - await_instances([inst['InstanceId']], desired_module_state='stopped', force_wait=True) + elif inst["State"]["Name"] == "stopping": + await_instances([inst["InstanceId"]], desired_module_state="stopped", force_wait=True) if module.check_mode: - changed.add(inst['InstanceId']) + changed.add(inst["InstanceId"]) continue - resp = client.start_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['StartingInstances']] + resp = client.start_instances(aws_retry=True, InstanceIds=[inst["InstanceId"]]) + [changed.add(i["InstanceId"]) for i in resp["StartingInstances"]] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: try: failure_reason = to_native(e.message) @@ -1767,34 +1951,39 @@ def change_instance_state(filters, desired_module_state): change_failed = list(to_change - changed) if instances: - instances = find_instances(ids=list(i['InstanceId'] for i in instances)) + instances = find_instances(ids=list(i["InstanceId"] for i in instances)) return changed, change_failed, instances, failure_reason def pretty_instance(i): - instance = camel_dict_to_snake_dict(i, ignore_list=['Tags']) - instance['tags'] = boto3_tag_list_to_ansible_dict(i.get('Tags', {})) + instance = camel_dict_to_snake_dict(i, ignore_list=["Tags"]) + instance["tags"] = boto3_tag_list_to_ansible_dict(i.get("Tags", {})) return instance def determine_iam_role(name_or_arn): - result = parse_aws_arn(name_or_arn) - if result and result['service'] == 'iam' and result['resource'].startswith('instance-profile/'): + if validate_aws_arn(name_or_arn, service="iam", resource_type="instance-profile"): return name_or_arn - iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) - return role['InstanceProfile']['Arn'] - except is_boto3_error_code('NoSuchEntity') as e: - module.fail_json_aws(e, msg="Could not find iam_instance_profile {0}".format(name_or_arn)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="An error occurred while searching for iam_instance_profile {0}. Please try supplying the full ARN.".format(name_or_arn)) + return role["InstanceProfile"]["Arn"] + except is_boto3_error_code("NoSuchEntity") as e: + module.fail_json_aws(e, msg=f"Could not find iam_instance_profile {name_or_arn}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg=f"An error occurred while searching for iam_instance_profile {name_or_arn}. Please try supplying the full ARN.", + ) -def handle_existing(existing_matches, state): - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - name = module.params.get('name') +def handle_existing(existing_matches, state, filters): + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + name = module.params.get("name") # Name is a tag rather than a direct parameter, we need to inject 'Name' # into tags, but since tags isn't explicitly passed we'll treat it not being @@ -1803,42 +1992,45 @@ def handle_existing(existing_matches, state): if tags is None: purge_tags = False tags = {} - tags.update({'Name': name}) + tags.update({"Name": name}) changed = False all_changes = list() for instance in existing_matches: - changed |= ensure_ec2_tags(client, module, instance['InstanceId'], tags=tags, purge_tags=purge_tags) + changed |= ensure_ec2_tags(client, module, instance["InstanceId"], tags=tags, purge_tags=purge_tags) + + changed |= change_instance_metadata_options(instance, module.params) + changes = diff_instance_and_params(instance, module.params) for c in changes: if not module.check_mode: try: client.modify_instance_attribute(aws_retry=True, **c) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not apply change {0} to existing instance.".format(str(c))) + module.fail_json_aws(e, msg=f"Could not apply change {str(c)} to existing instance.") all_changes.extend(changes) changed |= bool(changes) - changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('iam_instance_profile')) + changed |= add_or_update_instance_profile(existing_matches[0], module.params.get("iam_instance_profile")) changed |= change_network_attachments(existing_matches[0], module.params) - altered = find_instances(ids=[i['InstanceId'] for i in existing_matches]) + altered = find_instances(ids=[i["InstanceId"] for i in existing_matches]) alter_config_result = dict( changed=changed, instances=[pretty_instance(i) for i in altered], - instance_ids=[i['InstanceId'] for i in altered], + instance_ids=[i["InstanceId"] for i in altered], changes=changes, ) - state_results = ensure_instance_state(state) - alter_config_result['changed'] |= state_results.pop('changed', False) + state_results = ensure_instance_state(state, filters) + alter_config_result["changed"] |= state_results.pop("changed", False) result = {**state_results, **alter_config_result} return result def enforce_count(existing_matches, module, desired_module_state): - exact_count = module.params.get('exact_count') + exact_count = module.params.get("exact_count") try: current_count = len(existing_matches) @@ -1851,19 +2043,21 @@ def enforce_count(existing_matches, module, desired_module_state): ) elif current_count < exact_count: - to_launch = exact_count - current_count - module.params['to_launch'] = to_launch # launch instances try: - ensure_present(existing_matches=existing_matches, desired_module_state=desired_module_state) + ensure_present( + existing_matches=existing_matches, + desired_module_state=desired_module_state, + current_count=current_count, + ) except botocore.exceptions.ClientError as e: - module.fail_json(e, msg='Unable to launch instances') + module.fail_json(e, msg="Unable to launch instances") elif current_count > exact_count: to_terminate = current_count - exact_count # sort the instances from least recent to most recent based on launch time - existing_matches = sorted(existing_matches, key=lambda inst: inst['LaunchTime']) + existing_matches = sorted(existing_matches, key=lambda inst: inst["LaunchTime"]) # get the instance ids of instances with the count tag on them - all_instance_ids = [x['InstanceId'] for x in existing_matches] + all_instance_ids = [x["InstanceId"] for x in existing_matches] terminate_ids = all_instance_ids[0:to_terminate] if module.check_mode: module.exit_json( @@ -1875,16 +2069,16 @@ def enforce_count(existing_matches, module, desired_module_state): # terminate instances try: client.terminate_instances(aws_retry=True, InstanceIds=terminate_ids) - await_instances(terminate_ids, desired_module_state='terminated', force_wait=True) - except is_boto3_error_code('InvalidInstanceID.NotFound'): + await_instances(terminate_ids, desired_module_state="terminated", force_wait=True) + except is_boto3_error_code("InvalidInstanceID.NotFound"): pass except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json(e, msg='Unable to terminate instances') + module.fail_json(e, msg="Unable to terminate instances") # include data for all matched instances in addition to the list of terminations # allowing for recovery of metadata from the destructive operation module.exit_json( changed=True, - msg='Successfully terminated instances.', + msg="Successfully terminated instances.", terminated_ids=terminate_ids, instance_ids=all_instance_ids, instances=existing_matches, @@ -1894,14 +2088,14 @@ def enforce_count(existing_matches, module, desired_module_state): module.fail_json_aws(e, msg="Failed to enforce instance count") -def ensure_present(existing_matches, desired_module_state): - tags = dict(module.params.get('tags') or {}) - name = module.params.get('name') +def ensure_present(existing_matches, desired_module_state, current_count=None): + tags = dict(module.params.get("tags") or {}) + name = module.params.get("name") if name: - tags['Name'] = name + tags["Name"] = name try: - instance_spec = build_run_instance_spec(module.params) + instance_spec = build_run_instance_spec(module.params, current_count) # If check mode is enabled,suspend 'ensure function'. if module.check_mode: if existing_matches: @@ -1920,32 +2114,30 @@ def ensure_present(existing_matches, desired_module_state): msg="Would have launched instances if not in check_mode.", ) instance_response = run_instances(**instance_spec) - instances = instance_response['Instances'] - instance_ids = [i['InstanceId'] for i in instances] + instances = instance_response["Instances"] + instance_ids = [i["InstanceId"] for i in instances] # Wait for instances to exist in the EC2 API before # attempting to modify them - await_instances(instance_ids, desired_module_state='present', force_wait=True) + await_instances(instance_ids, desired_module_state="present", force_wait=True) for ins in instances: # Wait for instances to exist (don't check state) try: AWSRetry.jittered_backoff( - catch_extra_error_codes=['InvalidInstanceID.NotFound'], - )( - client.describe_instance_status - )( - InstanceIds=[ins['InstanceId']], + catch_extra_error_codes=["InvalidInstanceID.NotFound"], + )(client.describe_instance_status)( + InstanceIds=[ins["InstanceId"]], IncludeAllInstances=True, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to fetch status of new EC2 instance") - changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized']) + changes = diff_instance_and_params(ins, module.params, skip=["UserData", "EbsOptimized"]) for c in changes: try: client.modify_instance_attribute(aws_retry=True, **c) except botocore.exceptions.ClientError as e: - module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c))) + module.fail_json_aws(e, msg=f"Could not apply change {str(c)} to new instance.") if existing_matches: # If we came from enforce_count, create a second list to distinguish # between existing and new instances when returning the entire cohort @@ -1990,7 +2182,7 @@ def ensure_present(existing_matches, desired_module_state): def run_instances(**instance_spec): try: return client.run_instances(aws_retry=True, **instance_spec) - except is_boto3_error_message('Invalid IAM Instance Profile ARN'): + except is_boto3_error_message("Invalid IAM Instance Profile ARN"): # If the instance profile has just been created, it takes some time to be visible by ec2 # So we wait 10 second and retry the run_instances time.sleep(10) @@ -2000,40 +2192,40 @@ def run_instances(**instance_spec): def build_filters(): filters = { # all states except shutting-down and terminated - 'instance-state-name': ['pending', 'running', 'stopping', 'stopped'], + "instance-state-name": ["pending", "running", "stopping", "stopped"], } - if isinstance(module.params.get('instance_ids'), string_types): - filters['instance-id'] = [module.params.get('instance_ids')] - elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')): - filters['instance-id'] = module.params.get('instance_ids') + if isinstance(module.params.get("instance_ids"), string_types): + filters["instance-id"] = [module.params.get("instance_ids")] + elif isinstance(module.params.get("instance_ids"), list) and len(module.params.get("instance_ids")): + filters["instance-id"] = module.params.get("instance_ids") else: - if not module.params.get('vpc_subnet_id'): - if module.params.get('network'): + if not module.params.get("vpc_subnet_id"): + if module.params.get("network"): # grab AZ from one of the ENIs - ints = module.params.get('network').get('interfaces') + ints = module.params.get("network").get("interfaces") if ints: - filters['network-interface.network-interface-id'] = [] + filters["network-interface.network-interface-id"] = [] for i in ints: if isinstance(i, dict): - i = i['id'] - filters['network-interface.network-interface-id'].append(i) + i = i["id"] + filters["network-interface.network-interface-id"].append(i) else: - sub = get_default_subnet(get_default_vpc(), availability_zone=module.params.get('availability_zone')) - filters['subnet-id'] = sub['SubnetId'] + sub = get_default_subnet(get_default_vpc(), availability_zone=module.params.get("availability_zone")) + filters["subnet-id"] = sub["SubnetId"] else: - filters['subnet-id'] = [module.params.get('vpc_subnet_id')] + filters["subnet-id"] = [module.params.get("vpc_subnet_id")] - if module.params.get('name'): - filters['tag:Name'] = [module.params.get('name')] - elif module.params.get('tags'): - name_tag = module.params.get('tags').get('Name', None) + if module.params.get("name"): + filters["tag:Name"] = [module.params.get("name")] + elif module.params.get("tags"): + name_tag = module.params.get("tags").get("Name", None) if name_tag: - filters['tag:Name'] = [name_tag] + filters["tag:Name"] = [name_tag] - if module.params.get('image_id'): - filters['image-id'] = [module.params.get('image_id')] - elif (module.params.get('image') or {}).get('id'): - filters['image-id'] = [module.params.get('image', {}).get('id')] + if module.params.get("image_id"): + filters["image-id"] = [module.params.get("image_id")] + elif (module.params.get("image") or {}).get("id"): + filters["image-id"] = [module.params.get("image", {}).get("id")] return filters @@ -2042,129 +2234,183 @@ def main(): global client argument_spec = dict( - state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']), - wait=dict(default=True, type='bool'), - wait_timeout=dict(default=600, type='int'), - count=dict(type='int'), - exact_count=dict(type='int'), - image=dict(type='dict'), - image_id=dict(type='str'), - instance_type=dict(type='str'), - user_data=dict(type='str'), + state=dict( + default="present", + choices=["present", "started", "running", "stopped", "restarted", "rebooted", "terminated", "absent"], + ), + wait=dict(default=True, type="bool"), + wait_timeout=dict(default=600, type="int"), + count=dict(type="int"), + exact_count=dict(type="int"), + image=dict(type="dict"), + image_id=dict(type="str"), + instance_type=dict(type="str"), + user_data=dict(type="str"), aap_callback=dict( - type='dict', aliases=['tower_callback'], + type="dict", + aliases=["tower_callback"], required_if=[ - ('windows', False, ('tower_address', 'job_template_id', 'host_config_key',), False), + ( + "windows", + False, + ( + "tower_address", + "job_template_id", + "host_config_key", + ), + False, + ), ], options=dict( - windows=dict(type='bool', default=False), - set_password=dict(type='str', no_log=True), - tower_address=dict(type='str'), - job_template_id=dict(type='str'), - host_config_key=dict(type='str', no_log=True), + windows=dict(type="bool", default=False), + set_password=dict(type="str", no_log=True), + tower_address=dict(type="str"), + job_template_id=dict(type="str"), + host_config_key=dict(type="str", no_log=True), + ), + ), + ebs_optimized=dict(type="bool"), + vpc_subnet_id=dict(type="str", aliases=["subnet_id"]), + availability_zone=dict(type="str"), + security_groups=dict(default=[], type="list", elements="str"), + security_group=dict(type="str"), + iam_instance_profile=dict(type="str", aliases=["instance_role"]), + name=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + filters=dict(type="dict", default=None), + launch_template=dict(type="dict"), + license_specifications=dict( + type="list", + elements="dict", + options=dict( + license_configuration_arn=dict(type="str", required=True), + ), + ), + key_name=dict(type="str"), + cpu_credit_specification=dict(type="str", choices=["standard", "unlimited"]), + cpu_options=dict( + type="dict", + options=dict( + core_count=dict(type="int", required=True), + threads_per_core=dict(type="int", choices=[1, 2], required=True), ), ), - ebs_optimized=dict(type='bool'), - vpc_subnet_id=dict(type='str', aliases=['subnet_id']), - availability_zone=dict(type='str'), - security_groups=dict(default=[], type='list', elements='str'), - security_group=dict(type='str'), - iam_instance_profile=dict(type='str', aliases=['instance_role']), - name=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - filters=dict(type='dict', default=None), - launch_template=dict(type='dict'), - key_name=dict(type='str'), - cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']), - cpu_options=dict(type='dict', options=dict( - core_count=dict(type='int', required=True), - threads_per_core=dict(type='int', choices=[1, 2], required=True) - )), - tenancy=dict(type='str', choices=['dedicated', 'default']), - placement_group=dict(type='str'), - instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']), - termination_protection=dict(type='bool'), - hibernation_options=dict(type='bool', default=False), - detailed_monitoring=dict(type='bool'), - instance_ids=dict(default=[], type='list', elements='str'), - network=dict(default=None, type='dict'), - volumes=dict(default=None, type='list', elements='dict'), + tenancy=dict(type="str", choices=["dedicated", "default"]), + placement_group=dict(type="str"), + placement=dict( + type="dict", + options=dict( + affinity=dict(type="str"), + availability_zone=dict(type="str"), + group_name=dict(type="str"), + host_id=dict(type="str"), + host_resource_group_arn=dict(type="str"), + partition_number=dict(type="int"), + tenancy=dict(type="str", choices=["dedicated", "default"]), + ), + ), + instance_initiated_shutdown_behavior=dict(type="str", choices=["stop", "terminate"]), + termination_protection=dict(type="bool"), + hibernation_options=dict(type="bool", default=False), + detailed_monitoring=dict(type="bool"), + instance_ids=dict(default=[], type="list", elements="str"), + network=dict(default=None, type="dict"), + volumes=dict(default=None, type="list", elements="dict"), metadata_options=dict( - type='dict', + type="dict", options=dict( - http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), - http_put_response_hop_limit=dict(type='int', default=1), - http_tokens=dict(choices=['optional', 'required'], default='optional'), - http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'), - instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'), - ) + http_endpoint=dict(choices=["enabled", "disabled"], default="enabled"), + http_put_response_hop_limit=dict(type="int", default=1), + http_tokens=dict(choices=["optional", "required"], default="optional"), + http_protocol_ipv6=dict(choices=["disabled", "enabled"], default="disabled"), + instance_metadata_tags=dict(choices=["disabled", "enabled"], default="disabled"), + ), ), + additional_info=dict(type="str"), ) # running/present are synonyms # as are terminated/absent module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['security_groups', 'security_group'], - ['availability_zone', 'vpc_subnet_id'], - ['aap_callback', 'user_data'], - ['image_id', 'image'], - ['exact_count', 'count'], - ['exact_count', 'instance_ids'], + ["security_groups", "security_group"], + ["availability_zone", "vpc_subnet_id"], + ["aap_callback", "user_data"], + ["image_id", "image"], + ["exact_count", "count"], + ["exact_count", "instance_ids"], + ["tenancy", "placement"], + ["placement_group", "placement"], ], - supports_check_mode=True + supports_check_mode=True, ) - if not module.params.get('instance_type') and not module.params.get('launch_template'): - if module.params.get('state') not in ('absent', 'stopped'): - if module.params.get('count') or module.params.get('exact_count'): - module.deprecate("Default value instance_type has been deprecated, in the future you must set an instance_type or a launch_template", - date='2023-01-01', collection_name='amazon.aws') result = dict() - if module.params.get('network'): - if module.params.get('network').get('interfaces'): - if module.params.get('security_group'): + if module.params.get("network"): + if module.params.get("network").get("interfaces"): + if module.params.get("security_group"): module.fail_json(msg="Parameter network.interfaces can't be used with security_group") - if module.params.get('security_groups'): + if module.params.get("security_groups"): module.fail_json(msg="Parameter network.interfaces can't be used with security_groups") - state = module.params.get('state') + if module.params.get("placement_group"): + module.deprecate( + "The placement_group parameter has been deprecated, please use placement.group_name instead.", + date="2025-12-01", + collection_name="amazon.aws", + ) + + if module.params.get("tenancy"): + module.deprecate( + "The tenancy parameter has been deprecated, please use placement.tenancy instead.", + date="2025-12-01", + collection_name="amazon.aws", + ) + + state = module.params.get("state") retry_decorator = AWSRetry.jittered_backoff( catch_extra_error_codes=[ - 'IncorrectState', - 'InsuffienctInstanceCapacity', + "IncorrectState", + "InsuffienctInstanceCapacity", + "InvalidInstanceID.NotFound", ] ) - client = module.client('ec2', retry_decorator=retry_decorator) + client = module.client("ec2", retry_decorator=retry_decorator) - if module.params.get('filters') is None: - module.params['filters'] = build_filters() + filters = module.params.get("filters") + if filters is None: + filters = build_filters() - existing_matches = find_instances(filters=module.params.get('filters')) + try: + existing_matches = find_instances(filters=filters) - if state in ('terminated', 'absent'): - if existing_matches: - result = ensure_instance_state(state) + if state in ("terminated", "absent"): + if existing_matches: + result = ensure_instance_state(state, filters) + else: + result = dict( + msg="No matching instances found", + changed=False, + ) + elif module.params.get("exact_count"): + enforce_count(existing_matches, module, desired_module_state=state) + elif existing_matches and not module.params.get("count"): + for match in existing_matches: + warn_if_public_ip_assignment_changed(match) + warn_if_cpu_options_changed(match) + result = handle_existing(existing_matches, state, filters=filters) else: - result = dict( - msg='No matching instances found', - changed=False, - ) - elif module.params.get('exact_count'): - enforce_count(existing_matches, module, desired_module_state=state) - elif existing_matches and not module.params.get('count'): - for match in existing_matches: - warn_if_public_ip_assignment_changed(match) - warn_if_cpu_options_changed(match) - result = handle_existing(existing_matches, state) - else: - result = ensure_present(existing_matches=existing_matches, desired_module_state=state) + result = ensure_present(existing_matches=existing_matches, desired_module_state=state) + except Ec2InstanceAWSError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py index e1ef2ec41..1caea9365 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_instance_info version_added: 1.0.0 @@ -38,15 +36,38 @@ options: required: false aliases: ['uptime'] type: int - + include_attributes: + description: + - Describes the specified attributes of the returned instances. + required: false + type: list + elements: str + choices: + - instanceType + - kernel + - ramdisk + - userData + - disableApiTermination + - instanceInitiatedShutdownBehavior + - rootDeviceName + - blockDeviceMapping + - productCodes + - sourceDestCheck + - groupSet + - ebsOptimized + - sriovNetSupport + - enclaveOptions + - disableApiStop + aliases: ['attributes'] + version_added: 6.3.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all instances @@ -70,7 +91,7 @@ EXAMPLES = r''' - name: Gather information about any instance in states "shutting-down", "stopping", "stopped" amazon.aws.ec2_instance_info: filters: - instance-state-name: [ "shutting-down", "stopping", "stopped" ] + instance-state-name: ["shutting-down", "stopping", "stopped"] - name: Gather information about any instance with Name beginning with RHEL and an uptime of at least 60 minutes amazon.aws.ec2_instance_info: @@ -78,12 +99,18 @@ EXAMPLES = r''' uptime: 60 filters: "tag:Name": "RHEL-*" - instance-state-name: [ "running"] + instance-state-name: ["running"] register: ec2_node_info -''' +- name: Gather information about a particular instance using ID and include kernel attribute + amazon.aws.ec2_instance_info: + instance_ids: + - i-12345678 + include_attributes: + - kernel +""" -RETURN = r''' +RETURN = r""" instances: description: A list of ec2 instances. returned: always @@ -504,7 +531,21 @@ instances: returned: always type: dict sample: vpc-0011223344 -''' + attributes: + description: The details of the instance attribute specified on input. + returned: when include_attribute is specified + type: dict + sample: + { + 'disable_api_termination': { + 'value': True + }, + 'ebs_optimized': { + 'value': True + } + } + version_added: 6.3.0 +""" import datetime @@ -515,22 +556,21 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff() def _describe_instances(connection, **params): - paginator = connection.get_paginator('describe_instances') + paginator = connection.get_paginator("describe_instances") return paginator.paginate(**params).build_full_result() def list_ec2_instances(connection, module): - instance_ids = module.params.get("instance_ids") - uptime = module.params.get('minimum_uptime') + uptime = module.params.get("minimum_uptime") filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) try: @@ -544,45 +584,80 @@ def list_ec2_instances(connection, module): timedelta = int(uptime) if uptime else 0 oldest_launch_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=timedelta) # Get instances from reservations - for reservation in reservations['Reservations']: - instances += [instance for instance in reservation['Instances'] if instance['LaunchTime'].replace(tzinfo=None) < oldest_launch_time] + for reservation in reservations["Reservations"]: + instances += [ + instance + for instance in reservation["Instances"] + if instance["LaunchTime"].replace(tzinfo=None) < oldest_launch_time + ] else: - for reservation in reservations['Reservations']: - instances = instances + reservation['Instances'] + for reservation in reservations["Reservations"]: + instances = instances + reservation["Instances"] + + # include instances attributes + attributes = module.params.get("include_attributes") + if attributes: + for instance in instances: + instance["attributes"] = describe_instance_attributes(connection, instance["InstanceId"], attributes) # Turn the boto3 result in to ansible_friendly_snaked_names snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] # Turn the boto3 result in to ansible friendly tag dictionary for instance in snaked_instances: - instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value') + instance["tags"] = boto3_tag_list_to_ansible_dict(instance.get("tags", []), "key", "value") module.exit_json(instances=snaked_instances) -def main(): +def describe_instance_attributes(connection, instance_id, attributes): + result = {} + for attr in attributes: + response = connection.describe_instance_attribute(Attribute=attr, InstanceId=instance_id) + for key in response: + if key not in ("InstanceId", "ResponseMetadata"): + result[key] = response[key] + return result + +def main(): + instance_attributes = [ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport", + "enclaveOptions", + "disableApiStop", + ] argument_spec = dict( - minimum_uptime=dict(required=False, type='int', default=None, aliases=['uptime']), - instance_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') + minimum_uptime=dict(required=False, type="int", default=None, aliases=["uptime"]), + instance_ids=dict(default=[], type="list", elements="str"), + filters=dict(default={}, type="dict"), + include_attributes=dict(type="list", elements="str", aliases=["attributes"], choices=instance_attributes), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[ - ['instance_ids', 'filters'] - ], + mutually_exclusive=[["instance_ids", "filters"]], supports_check_mode=True, ) try: - connection = module.client('ec2') + connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_ec2_instances(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_key.py b/ansible_collections/amazon/aws/plugins/modules/ec2_key.py index 8358d9dba..ea4d7f7e4 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_key.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_key.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_key version_added: 1.0.0 @@ -45,31 +42,43 @@ options: EC2 Instance Connect, and EC2 Serial Console. - By default Amazon will create an RSA key. - Mutually exclusive with parameter I(key_material). - - Requires at least botocore version 1.21.23. type: str choices: - rsa - ed25519 version_added: 3.1.0 + file_name: + description: + - Name of the file where the generated private key will be saved. + - When provided, the I(key.private_key) attribute will be removed from the return value. + - The file is written out on the 'host' side rather than the 'controller' side. + - Ignored when I(state=absent) or I(key_material) is provided. + type: path + version_added: 6.4.0 notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. + - For security reasons, this module should be used with B(no_log=true) and (register) functionalities + when creating new key pair without providing I(key_material). extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 author: - "Vincent Viallet (@zbal)" - "Prasad Katti (@prasadkatti)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create a new EC2 key pair, returns generated private key + # use no_log to avoid private key being displayed into output amazon.aws.ec2_key: name: my_keypair + no_log: true + register: aws_ec2_key_pair - name: create key pair using provided key_material amazon.aws.ec2_key: @@ -81,10 +90,11 @@ EXAMPLES = ''' name: my_keypair key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}" -- name: Create ED25519 key pair +- name: Create ED25519 key pair and save private key into a file amazon.aws.ec2_key: name: my_keypair key_type: ed25519 + file_name: /tmp/aws_ssh_rsa # try creating a key pair with the name of an already existing keypair # but don't overwrite it even if the key is different (force=false) @@ -94,13 +104,13 @@ EXAMPLES = ''' key_material: 'ssh-rsa AAAAxyz...== me@example.com' force: false -- name: remove key pair by name +- name: remove key pair from AWS by name amazon.aws.ec2_key: name: my_keypair state: absent -''' +""" -RETURN = ''' +RETURN = r""" changed: description: whether a keypair was created/deleted returned: always @@ -138,7 +148,7 @@ key: sample: '{"my_key": "my value"}' private_key: description: private key of a newly created keypair - returned: when a new keypair is created by AWS (key_material is not provided) + returned: when a new keypair is created by AWS (I(key_material) is not provided) and I(file_name) is not provided. type: str sample: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKC... @@ -149,8 +159,9 @@ key: type: str sample: rsa version_added: 3.1.0 -''' +""" +import os import uuid try: @@ -160,13 +171,13 @@ except ImportError: from ansible.module_utils._text import to_bytes -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters class Ec2KeyFailure(Exception): @@ -177,11 +188,7 @@ class Ec2KeyFailure(Exception): def _import_key_pair(ec2_client, name, key_material, tag_spec=None): - params = { - 'KeyName': name, - 'PublicKeyMaterial': to_bytes(key_material), - 'TagSpecifications': tag_spec - } + params = {"KeyName": name, "PublicKeyMaterial": to_bytes(key_material), "TagSpecifications": tag_spec} params = scrub_none_parameters(params) @@ -192,28 +199,31 @@ def _import_key_pair(ec2_client, name, key_material, tag_spec=None): return key -def extract_key_data(key, key_type=None): +def extract_key_data(key, key_type=None, file_name=None): data = { - 'name': key['KeyName'], - 'fingerprint': key['KeyFingerprint'], - 'id': key['KeyPairId'], - 'tags': boto3_tag_list_to_ansible_dict(key.get('Tags') or []), + "name": key["KeyName"], + "fingerprint": key["KeyFingerprint"], + "id": key["KeyPairId"], + "tags": boto3_tag_list_to_ansible_dict(key.get("Tags") or []), # KeyMaterial is returned by create_key_pair, but not by describe_key_pairs - 'private_key': key.get('KeyMaterial'), + "private_key": key.get("KeyMaterial"), # KeyType is only set by describe_key_pairs - 'type': key.get('KeyType') or key_type + "type": key.get("KeyType") or key_type, } + # Write the private key to disk and remove it from the return value + if file_name and data["private_key"] is not None: + data = _write_private_key(data, file_name) return scrub_none_parameters(data) def get_key_fingerprint(check_mode, ec2_client, key_material): - ''' + """ EC2's fingerprints are non-trivial to generate, so push this key to a temporary name and make ec2 calculate the fingerprint for us. http://blog.jbrowne.com/?p=23 https://forums.aws.amazon.com/thread.jspa?messageID=352828 - ''' + """ # find an unused name name_in_use = True while name_in_use: @@ -221,27 +231,30 @@ def get_key_fingerprint(check_mode, ec2_client, key_material): name_in_use = find_key_pair(ec2_client, random_name) temp_key = _import_key_pair(ec2_client, random_name, key_material) delete_key_pair(check_mode, ec2_client, random_name, finish_task=False) - return temp_key['KeyFingerprint'] + return temp_key["KeyFingerprint"] def find_key_pair(ec2_client, name): try: key = ec2_client.describe_key_pairs(aws_retry=True, KeyNames=[name]) - except is_boto3_error_code('InvalidKeyPair.NotFound'): + except is_boto3_error_code("InvalidKeyPair.NotFound"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as err: # pylint: disable=duplicate-except raise Ec2KeyFailure(err, "error finding keypair") except IndexError: key = None - return key['KeyPairs'][0] + return key["KeyPairs"][0] def _create_key_pair(ec2_client, name, tag_spec, key_type): params = { - 'KeyName': name, - 'TagSpecifications': tag_spec, - 'KeyType': key_type, + "KeyName": name, + "TagSpecifications": tag_spec, + "KeyType": key_type, } params = scrub_none_parameters(params) @@ -253,31 +266,47 @@ def _create_key_pair(ec2_client, name, tag_spec, key_type): return key -def create_new_key_pair(ec2_client, name, key_material, key_type, tags, check_mode): - ''' +def _write_private_key(key_data, file_name): + """ + Write the private key data to the specified file, and remove 'private_key' + from the ouput. This ensures we don't expose the key data in logs or task output. + """ + try: + file = os.open(file_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) + os.write(file, key_data["private_key"].encode("utf-8")) + os.close(file) + except (IOError, OSError) as e: + raise Ec2KeyFailure(e, "Could not save private key to specified path. Private key is irretrievable.") + + del key_data["private_key"] + return key_data + + +def create_new_key_pair(ec2_client, name, key_material, key_type, tags, file_name, check_mode): + """ key does not exist, we create new key - ''' + """ if check_mode: - return {'changed': True, 'key': None, 'msg': 'key pair created'} + return {"changed": True, "key": None, "msg": "key pair created"} - tag_spec = boto3_tag_specifications(tags, ['key-pair']) + tag_spec = boto3_tag_specifications(tags, ["key-pair"]) if key_material: key = _import_key_pair(ec2_client, name, key_material, tag_spec) else: key = _create_key_pair(ec2_client, name, tag_spec, key_type) - key_data = extract_key_data(key, key_type) + key_data = extract_key_data(key, key_type, file_name) - result = {'changed': True, 'key': key_data, 'msg': 'key pair created'} + result = {"changed": True, "key": key_data, "msg": "key pair created"} return result def update_key_pair_by_key_material(check_mode, ec2_client, name, key, key_material, tag_spec): if check_mode: - return {'changed': True, 'key': None, 'msg': 'key pair updated'} + return {"changed": True, "key": None, "msg": "key pair updated"} new_fingerprint = get_key_fingerprint(check_mode, ec2_client, key_material) changed = False msg = "key pair already exists" - if key['KeyFingerprint'] != new_fingerprint: + if key["KeyFingerprint"] != new_fingerprint: delete_key_pair(check_mode, ec2_client, name, finish_task=False) key = _import_key_pair(ec2_client, name, key_material, tag_spec) msg = "key pair updated" @@ -286,14 +315,14 @@ def update_key_pair_by_key_material(check_mode, ec2_client, name, key, key_mater return {"changed": changed, "key": key_data, "msg": msg} -def update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec): +def update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec, file_name): if check_mode: - return {'changed': True, 'key': None, 'msg': 'key pair updated'} + return {"changed": True, "key": None, "msg": "key pair updated"} else: delete_key_pair(check_mode, ec2_client, name, finish_task=False) key = _create_key_pair(ec2_client, name, tag_spec, key_type) - key_data = extract_key_data(key, key_type) - return {'changed': True, 'key': key_data, 'msg': "key pair updated"} + key_data = extract_key_data(key, key_type, file_name) + return {"changed": True, "key": key_data, "msg": "key pair updated"} def _delete_key_pair(ec2_client, key_name): @@ -307,82 +336,83 @@ def delete_key_pair(check_mode, ec2_client, name, finish_task=True): key = find_key_pair(ec2_client, name) if key and check_mode: - result = {'changed': True, 'key': None, 'msg': 'key deleted'} + result = {"changed": True, "key": None, "msg": "key deleted"} elif not key: - result = {'key': None, 'msg': 'key did not exist'} + result = {"key": None, "msg": "key did not exist"} + return result else: _delete_key_pair(ec2_client, name) if not finish_task: return - result = {'changed': True, 'key': None, 'msg': 'key deleted'} + result = {"changed": True, "key": None, "msg": "key deleted"} return result def handle_existing_key_pair_update(module, ec2_client, name, key): - key_material = module.params.get('key_material') - force = module.params.get('force') - key_type = module.params.get('key_type') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - tag_spec = boto3_tag_specifications(tags, ['key-pair']) + key_material = module.params.get("key_material") + force = module.params.get("force") + key_type = module.params.get("key_type") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + tag_spec = boto3_tag_specifications(tags, ["key-pair"]) check_mode = module.check_mode + file_name = module.params.get("file_name") if key_material and force: result = update_key_pair_by_key_material(check_mode, ec2_client, name, key, key_material, tag_spec) - elif key_type and key_type != key['KeyType']: - result = update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec) + elif key_type and key_type != key["KeyType"]: + result = update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec, file_name) else: changed = False - changed |= ensure_ec2_tags(ec2_client, module, key['KeyPairId'], tags=tags, purge_tags=purge_tags) + changed |= ensure_ec2_tags(ec2_client, module, key["KeyPairId"], tags=tags, purge_tags=purge_tags) key = find_key_pair(ec2_client, name) - key_data = extract_key_data(key) + key_data = extract_key_data(key, file_name=file_name) result = {"changed": changed, "key": key_data, "msg": "key pair already exists"} return result def main(): - argument_spec = dict( name=dict(required=True), key_material=dict(no_log=False), - force=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - key_type=dict(type='str', choices=['rsa', 'ed25519']), + force=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + key_type=dict(type="str", choices=["rsa", "ed25519"]), + file_name=dict(type="path", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[ - ['key_material', 'key_type'] - ], - supports_check_mode=True + mutually_exclusive=[["key_material", "key_type"]], + supports_check_mode=True, ) - ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2_client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - name = module.params['name'] - state = module.params.get('state') - key_material = module.params.get('key_material') - key_type = module.params.get('key_type') - tags = module.params.get('tags') + name = module.params["name"] + state = module.params.get("state") + key_material = module.params.get("key_material") + key_type = module.params.get("key_type") + tags = module.params.get("tags") + file_name = module.params.get("file_name") result = {} - if key_type: - module.require_botocore_at_least('1.21.23', reason='to set the key_type for a keypair') try: - if state == 'absent': + if state == "absent": result = delete_key_pair(module.check_mode, ec2_client, name) - elif state == 'present': + elif state == "present": # check if key already exists key = find_key_pair(ec2_client, name) if key: result = handle_existing_key_pair_update(module, ec2_client, name, key) else: - result = create_new_key_pair(ec2_client, name, key_material, key_type, tags, module.check_mode) + result = create_new_key_pair( + ec2_client, name, key_material, key_type, tags, file_name, module.check_mode + ) except Ec2KeyFailure as e: if e.original_e: @@ -393,5 +423,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_key_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_key_info.py new file mode 100644 index 000000000..f8701a11b --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_key_info.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: ec2_key_info +version_added: 6.4.0 +short_description: Gather information about EC2 key pairs in AWS +description: + - Gather information about EC2 key pairs in AWS. +author: + - Aubin Bikouo (@abikouo) +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeKeyPairs.html) for possible filters. Filter + names and values are case sensitive. + required: false + default: {} + type: dict + names: + description: + - The key pair names. + required: false + type: list + elements: str + default: [] + ids: + description: + - The IDs of the key pairs. + required: false + type: list + elements: str + default: [] + include_public_key: + description: + - Whether or not to include the public key material in the response. + type: bool + default: false +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all key pairs + amazon.aws.ec2_key_info: + +- name: Gather information about a specific key pair + amazon.aws.ec2_key_info: + names: + - my-sample-key + +- name: Retrieve EC2 key pair by fingerprint + amazon.aws.ec2_key_info: + filters: + fingerprint: "1bSd8jVye3In5oF4zZI4o8BcXfdbYN+daCt9O1fh3Qk=" +""" + +RETURN = r""" +keypairs: + description: A list of ec2 key pairs. + returned: always + type: complex + contains: + key_pair_id: + description: The ID of the key pair. + returned: always + type: str + sample: key-01238eb03f07d7268 + key_fingerprint: + description: Fingerprint of the key. + returned: always + type: str + sample: '05:97:1a:2a:df:f6:06:a9:98:4b:ca:05:71:a1:81:e8:ff:6d:d2:a3' + key_name: + description: The name of the key pair. + returned: always + type: str + sample: my-sample-keypair + key_type: + description: The type of key pair. + returned: always + type: str + sample: rsa + public_key: + description: The public key material. + returned: always + type: str + create_time: + description: The time the key pair was created. + returned: always + type: str + sample: "2023-08-16T10:13:33.025000+00:00" + tags: + description: A dictionary representing the tags attached to the key pair. + returned: always + type: dict + sample: '{"my_key": "my value"}' +""" + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + + +def list_ec2_key_pairs(connection, module): + ids = module.params.get("ids") + names = module.params.get("names") + include_public_key = module.params.get("include_public_key") + filters = module.params.get("filters") + if filters: + filters = ansible_dict_to_boto3_filter_list(filters) + + params = {} + if filters: + params["Filters"] = filters + if ids: + params["KeyPairIds"] = ids + if names: + params["KeyNames"] = names + if include_public_key: + params["IncludePublicKey"] = True + + try: + result = connection.describe_key_pairs(**params) + except is_boto3_error_code("InvalidKeyPair.NotFound"): + result = {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list EC2 key pairs") + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_keys = [camel_dict_to_snake_dict(key) for key in result.get("KeyPairs", [])] + + # Turn the boto3 result in to ansible friendly tag dictionary + for key in snaked_keys: + key["tags"] = boto3_tag_list_to_ansible_dict(key.get("tags", []), "key", "value") + + module.exit_json(keypairs=snaked_keys) + + +def main(): + argument_spec = dict( + filters=dict(type="dict", default={}), + names=dict(type="list", elements="str", default=[]), + ids=dict(type="list", elements="str", default=[]), + include_public_key=dict(type="bool", default=False), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + list_ec2_key_pairs(connection, module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py index f7e9d509f..26ecaad0a 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_metadata_facts version_added: 1.0.0 @@ -26,18 +23,18 @@ description: is set to disabled for the EC2 instance, the module will return an error while retrieving a session token. notes: - Parameters to filter on ec2_metadata_facts may be added later. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Gather EC2 metadata facts - amazon.aws.ec2_metadata_facts: - debug: msg: "This instance is a t1.micro" when: ansible_ec2_instance_type == "t1.micro" -''' +""" -RETURN = ''' +RETURN = r""" ansible_facts: description: Dictionary of new facts representing discovered properties of the EC2 instance. returned: changed @@ -435,17 +432,18 @@ ansible_facts: description: The instance user data. type: str sample: "#!/bin/bash" -''' +""" import json import re import socket import time +import zlib -from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import quote +from ansible.module_utils.urls import fetch_url socket.setdefaulttimeout(5) @@ -458,13 +456,13 @@ except AttributeError: json_decode_error = ValueError -class Ec2Metadata(object): - ec2_metadata_token_uri = 'http://169.254.169.254/latest/api/token' - ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/' - ec2_metadata_instance_tags_uri = 'http://169.254.169.254/latest/meta-data/tags/instance' - ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' - ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/' - ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/' +class Ec2Metadata: + ec2_metadata_token_uri = "http://169.254.169.254/latest/api/token" + ec2_metadata_uri = "http://169.254.169.254/latest/meta-data/" + ec2_metadata_instance_tags_uri = "http://169.254.169.254/latest/meta-data/tags/instance" + ec2_sshdata_uri = "http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key" + ec2_userdata_uri = "http://169.254.169.254/latest/user-data/" + ec2_dynamicdata_uri = "http://169.254.169.254/latest/dynamic/" def __init__( self, @@ -485,39 +483,78 @@ class Ec2Metadata(object): self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri self._data = {} self._token = None - self._prefix = 'ansible_ec2_%s' + self._prefix = "ansible_ec2_%s" + + def _decode(self, data): + try: + return data.decode("utf-8") + except UnicodeDecodeError: + # Decoding as UTF-8 failed, return data without raising an error + self.module.warn("Decoding user-data as UTF-8 failed, return data as is ignoring any error") + return data.decode("utf-8", errors="ignore") + + def decode_user_data(self, data): + is_compressed = False + + # Check if data is compressed using zlib header + if data.startswith(b"\x78\x9c") or data.startswith(b"\x1f\x8b"): + is_compressed = True + + if is_compressed: + # Data is compressed, attempt decompression and decode using UTF-8 + try: + decompressed = zlib.decompress(data, zlib.MAX_WBITS | 32) + return self._decode(decompressed) + except zlib.error: + # Unable to decompress, return original data + self.module.warn( + "Unable to decompress user-data using zlib, attempt to decode original user-data as UTF-8" + ) + return self._decode(data) + else: + # Data is not compressed, decode using UTF-8 + return self._decode(data) def _fetch(self, url): - encoded_url = quote(url, safe='%/:=&?~#+!$,;\'@()*[]') + encoded_url = quote(url, safe="%/:=&?~#+!$,;'@()*[]") headers = {} if self._token: - headers = {'X-aws-ec2-metadata-token': self._token} + headers = {"X-aws-ec2-metadata-token": self._token} + response, info = fetch_url(self.module, encoded_url, headers=headers, force=True) - if info.get('status') in (401, 403): - self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info) - elif info.get('status') not in (200, 404): + if info.get("status") in (401, 403): + self.module.fail_json(msg="Failed to retrieve metadata from AWS: {0}".format(info["msg"]), response=info) + elif info.get("status") not in (200, 404): time.sleep(3) # request went bad, retry once then raise - self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg'])) + self.module.warn("Retrying query to metadata service. First attempt failed: {0}".format(info["msg"])) response, info = fetch_url(self.module, encoded_url, headers=headers, force=True) - if info.get('status') not in (200, 404): + if info.get("status") not in (200, 404): # fail out now - self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info) - if response and info['status'] < 400: + self.module.fail_json( + msg="Failed to retrieve metadata from AWS: {0}".format(info["msg"]), response=info + ) + if response and info["status"] < 400: data = response.read() + if "user-data" in encoded_url: + return to_text(self.decode_user_data(data)) else: data = None return to_text(data) def _mangle_fields(self, fields, uri, filter_patterns=None): - filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns + filter_patterns = ["public-keys-0"] if filter_patterns is None else filter_patterns new_fields = {} for key, value in fields.items(): - split_fields = key[len(uri):].split('/') + split_fields = key[len(uri):].split("/") # fmt: skip # Parse out the IAM role name (which is _not_ the same as the instance profile name) - if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and ':' not in split_fields[2]: + if ( + len(split_fields) == 3 + and split_fields[0:2] == ["iam", "security-credentials"] + and ":" not in split_fields[2] + ): new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2] if len(split_fields) > 1 and split_fields[1]: new_key = "-".join(split_fields) @@ -536,34 +573,34 @@ class Ec2Metadata(object): raw_subfields = self._fetch(uri) if not raw_subfields: return - subfields = raw_subfields.split('\n') + subfields = raw_subfields.split("\n") for field in subfields: - if field.endswith('/') and recurse: + if field.endswith("/") and recurse: self.fetch(uri + field) - if uri.endswith('/'): + if uri.endswith("/"): new_uri = uri + field else: - new_uri = uri + '/' + field - if new_uri not in self._data and not new_uri.endswith('/'): + new_uri = uri + "/" + field + if new_uri not in self._data and not new_uri.endswith("/"): content = self._fetch(new_uri) - if field == 'security-groups' or field == 'security-group-ids': - sg_fields = ",".join(content.split('\n')) - self._data['%s' % (new_uri)] = sg_fields + if field == "security-groups" or field == "security-group-ids": + sg_fields = ",".join(content.split("\n")) + self._data["%s" % (new_uri)] = sg_fields else: try: json_dict = json.loads(content) - self._data['%s' % (new_uri)] = content - for (key, value) in json_dict.items(): - self._data['%s:%s' % (new_uri, key.lower())] = value + self._data["%s" % (new_uri)] = content + for key, value in json_dict.items(): + self._data["%s:%s" % (new_uri, key.lower())] = value except (json_decode_error, AttributeError): - self._data['%s' % (new_uri)] = content # not a stringified JSON string + self._data["%s" % (new_uri)] = content # not a stringified JSON string def fix_invalid_varnames(self, data): """Change ':'' and '-' to '_' to ensure valid template variable names""" new_data = data.copy() for key, value in data.items(): - if ':' in key or '-' in key: - newkey = re.sub(':|-', '_', key) + if ":" in key or "-" in key: + newkey = re.sub(":|-", "_", key) new_data[newkey] = value del new_data[key] @@ -571,19 +608,23 @@ class Ec2Metadata(object): def fetch_session_token(self, uri_token): """Used to get a session token for IMDSv2""" - headers = {'X-aws-ec2-metadata-token-ttl-seconds': '60'} - response, info = fetch_url(self.module, uri_token, method='PUT', headers=headers, force=True) - - if info.get('status') == 403: - self.module.fail_json(msg='Failed to retrieve metadata token from AWS: {0}'.format(info['msg']), response=info) - elif info.get('status') not in (200, 404): + headers = {"X-aws-ec2-metadata-token-ttl-seconds": "60"} + response, info = fetch_url(self.module, uri_token, method="PUT", headers=headers, force=True) + + if info.get("status") == 403: + self.module.fail_json( + msg="Failed to retrieve metadata token from AWS: {0}".format(info["msg"]), response=info + ) + elif info.get("status") not in (200, 404): time.sleep(3) # request went bad, retry once then raise - self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg'])) - response, info = fetch_url(self.module, uri_token, method='PUT', headers=headers, force=True) - if info.get('status') not in (200, 404): + self.module.warn("Retrying query to metadata service. First attempt failed: {0}".format(info["msg"])) + response, info = fetch_url(self.module, uri_token, method="PUT", headers=headers, force=True) + if info.get("status") not in (200, 404): # fail out now - self.module.fail_json(msg='Failed to retrieve metadata token from AWS: {0}'.format(info['msg']), response=info) + self.module.fail_json( + msg="Failed to retrieve metadata token from AWS: {0}".format(info["msg"]), response=info + ) if response: token_data = response.read() else: @@ -594,8 +635,8 @@ class Ec2Metadata(object): self._token = self.fetch_session_token(self.uri_token) # create session token for IMDS self.fetch(self.uri_meta) # populate _data with metadata data = self._mangle_fields(self._data, self.uri_meta) - data[self._prefix % 'user-data'] = self._fetch(self.uri_user) - data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh) + data[self._prefix % "user-data"] = self._fetch(self.uri_user) + data[self._prefix % "public-key"] = self._fetch(self.uri_ssh) self._data = {} # clear out metadata in _data self.fetch(self.uri_dynamic) # populate _data with dynamic data @@ -604,12 +645,12 @@ class Ec2Metadata(object): data = self.fix_invalid_varnames(data) instance_tags_keys = self._fetch(self.uri_instance_tags) - instance_tags_keys = instance_tags_keys.split('\n') if instance_tags_keys != "None" else [] - data[self._prefix % 'instance_tags_keys'] = instance_tags_keys + instance_tags_keys = instance_tags_keys.split("\n") if instance_tags_keys != "None" else [] + data[self._prefix % "instance_tags_keys"] = instance_tags_keys # Maintain old key for backwards compatibility - if 'ansible_ec2_instance_identity_document_region' in data: - data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region'] + if "ansible_ec2_instance_identity_document_region" in data: + data["ansible_ec2_placement_region"] = data["ansible_ec2_instance_identity_document_region"] return data @@ -625,5 +666,5 @@ def main(): module.exit_json(**ec2_metadata_facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py index d4fa9b564..9d16f339f 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_security_group version_added: 1.0.0 @@ -33,7 +30,8 @@ options: type: str description: description: - - Description of the security group. Required when C(state) is C(present). + - Description of the security group. + - Required when I(state) is C(present). required: false type: str vpc_id: @@ -51,38 +49,42 @@ options: elements: dict suboptions: cidr_ip: - type: str - description: + type: list + elements: raw + description: - The IPv4 CIDR range traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). - Support for passing nested lists of strings to I(cidr_ip) has been deprecated and will be removed in a release after 2024-12-01. cidr_ipv6: - type: str - description: + type: list + elements: raw + description: - The IPv6 CIDR range traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). - Support for passing nested lists of strings to I(cidr_ipv6) has been deprecated and will be removed in a release after 2024-12-01. ip_prefix: - type: str - description: + type: list + elements: str + description: - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) that traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_id: - type: str - description: + type: list + elements: str + description: - The ID of the Security Group that traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_name: - type: list - elements: str - description: + type: list + elements: str + description: - Name of the Security Group that traffic is coming from. - If the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. @@ -90,47 +92,58 @@ options: - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_desc: - type: str - description: + type: str + description: - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. proto: - type: str - description: + type: str + description: - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or - - number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) - - When using C(icmp) or C(icmpv6) as the protocol, you can pass - - the C(icmp_type) and C(icmp_code) parameters instead of - - C(from_port) and C(to_port). + number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)). + default: 'tcp' from_port: - type: int - description: - - The start of the range of ports that traffic is coming from. + type: int + description: + - The start of the range of ports that traffic is going to. - A value can be between C(0) to C(65535). - - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + - When I(proto=icmp) a value of C(-1) indicates all ports. + - Mutually exclusive with I(icmp_code), I(icmp_type) and I(ports). to_port: - type: int - description: - - The end of the range of ports that traffic is coming from. + type: int + description: + - The end of the range of ports that traffic is going to. - A value can be between C(0) to C(65535). - - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + - When I(proto=icmp) a value of C(-1) indicates all ports. + - Mutually exclusive with I(icmp_code), I(icmp_type) and I(ports). + ports: + type: list + elements: str + description: + - A list of ports that traffic is going to. + - Elements of the list can be a single port (for example C(8080)), or a range of ports + specified as C(<START>-<END>), (for example C(1011-1023)). + - Mutually exclusive with I(icmp_code), I(icmp_type), I(from_port) and I(to_port). icmp_type: version_added: 3.3.0 type: int description: - - When using C(icmp) or C(icmpv6) as the protocol, allows you to - - specify the ICMP type to use. The option is mutually exclusive with C(from_port). - - A value of C(-1) indicates all ICMP types. + - The ICMP type of the packet. + - A value of C(-1) indicates all ICMP types. + - Requires I(proto=icmp) or I(proto=icmpv6). + - Mutually exclusive with I(ports), I(from_port) and I(to_port). icmp_code: version_added: 3.3.0 type: int description: - - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify - - the ICMP code to use. The option is mutually exclusive with C(to_port). - - A value of C(-1) indicates all ICMP codes. + - The ICMP code of the packet. + - A value of C(-1) indicates all ICMP codes. + - Requires I(proto=icmp) or I(proto=icmpv6). + - Mutually exclusive with I(ports), I(from_port) and I(to_port). rule_desc: - type: str - description: A description for the rule. + type: str + description: A description for the rule. + rules_egress: description: - List of firewall outbound rules to enforce in this group (see example). If none are supplied, @@ -141,80 +154,96 @@ options: aliases: ['egress_rules'] suboptions: cidr_ip: - type: str - description: + type: list + elements: raw + description: - The IPv4 CIDR range traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). - Support for passing nested lists of strings to I(cidr_ip) has been deprecated and will be removed in a release after 2024-12-01. cidr_ipv6: - type: str - description: + type: list + elements: raw + description: - The IPv6 CIDR range traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). - Support for passing nested lists of strings to I(cidr_ipv6) has been deprecated and will be removed in a release after 2024-12-01. ip_prefix: - type: str - description: + type: list + elements: str + description: - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) that traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_id: - type: str - description: + type: list + elements: str + description: - The ID of the Security Group that traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_name: - type: str - description: + type: list + elements: str + description: - Name of the Security Group that traffic is going to. - If the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_desc: - type: str - description: + type: str + description: - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. proto: - type: str - description: + type: str + description: - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or - - number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) - - When using C(icmp) or C(icmpv6) as the protocol, you can pass the - - C(icmp_type) and C(icmp_code) parameters instead of C(from_port) and C(to_port). + number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)). + default: 'tcp' from_port: - type: int - description: + type: int + description: - The start of the range of ports that traffic is going to. - A value can be between C(0) to C(65535). - - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + - When I(proto=icmp) a value of C(-1) indicates all ports. + - Mutually exclusive with I(icmp_code), I(icmp_type) and I(ports). to_port: - type: int - description: + type: int + description: - The end of the range of ports that traffic is going to. - A value can be between C(0) to C(65535). - - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + - When I(proto=icmp) a value of C(-1) indicates all ports. + - Mutually exclusive with I(icmp_code), I(icmp_type) and I(ports). + ports: + type: list + elements: str + description: + - A list of ports that traffic is going to. + - Elements of the list can be a single port (for example C(8080)), or a range of ports + specified as C(<START>-<END>), (for example C(1011-1023)). + - Mutually exclusive with I(icmp_code), I(icmp_type), I(from_port) and I(to_port). icmp_type: version_added: 3.3.0 type: int description: - - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify - - the ICMP type to use. The option is mutually exclusive with C(from_port). - - A value of C(-1) indicates all ICMP types. + - The ICMP type of the packet. + - A value of C(-1) indicates all ICMP types. + - Requires I(proto=icmp) or I(proto=icmpv6). + - Mutually exclusive with I(ports), I(from_port) and I(to_port). icmp_code: version_added: 3.3.0 type: int description: - - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify - - the ICMP code to use. The option is mutually exclusive with C(to_port). - - A value of C(-1) indicates all ICMP codes. + - The ICMP code of the packet. + - A value of C(-1) indicates all ICMP codes. + - Requires I(proto=icmp) or I(proto=icmpv6). + - Mutually exclusive with I(ports), I(from_port) and I(to_port). rule_desc: type: str description: A description for the rule. @@ -242,8 +271,8 @@ options: type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 @@ -253,20 +282,20 @@ notes: The module will refuse to create a depended-on group without a description. - Prior to release 5.0.0 this module was called C(amazon.aws.ec2_group_info). The usage did not change. -''' +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. -EXAMPLES = ''' - name: example using security group rule descriptions amazon.aws.ec2_security_group: name: "{{ name }}" description: sg with rule descriptions vpc_id: vpc-xxxxxxxx - profile: "{{ aws_profile }}" - region: us-east-1 rules: - proto: tcp ports: - - 80 + - 80 cidr_ip: 0.0.0.0/0 rule_desc: allow all on port 80 @@ -275,8 +304,6 @@ EXAMPLES = ''' name: "{{ name }}" description: sg for ICMP vpc_id: vpc-xxxxxxxx - profile: "{{ aws_profile }}" - region: us-east-1 rules: - proto: icmp icmp_type: 3 @@ -288,9 +315,6 @@ EXAMPLES = ''' name: example description: an example EC2 group vpc_id: 12345 - region: eu-west-1 - aws_secret_key: SECRET - aws_access_key: ACCESS rules: - proto: tcp from_port: 80 @@ -320,7 +344,7 @@ EXAMPLES = ''' group_id: sg-12345678 - proto: icmp from_port: 8 # icmp type, -1 = any type - to_port: -1 # icmp subtype, -1 = any subtype + to_port: -1 # icmp subtype, -1 = any subtype cidr_ip: 10.0.0.0/8 - proto: all # the containing group name may be specified here @@ -348,7 +372,6 @@ EXAMPLES = ''' name: example2 description: an example2 EC2 group vpc_id: 12345 - region: eu-west-1 rules: # 'ports' rule keyword was introduced in version 2.4. It accepts a single # port value or a list of values including ranges (from_port-to_port). @@ -381,16 +404,15 @@ EXAMPLES = ''' - 64:ff9b::/96 group_id: - sg-edcd9784 - diff: True + diff: true - name: "Delete group by its id" amazon.aws.ec2_security_group: - region: eu-west-1 group_id: sg-33b4ee5b state: absent -''' +""" -RETURN = ''' +RETURN = r""" group_name: description: Security group name sample: My Security Group @@ -447,19 +469,20 @@ owner_id: sample: 123456789012 type: int returned: on create/update -''' +""" import itertools import json import re from collections import namedtuple from copy import deepcopy -from ipaddress import IPv6Network from ipaddress import ip_network from time import sleep try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule @@ -469,26 +492,43 @@ from ansible.module_utils.common.network import to_ipv6_subnet from ansible.module_utils.common.network import to_subnet from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description']) -valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix']) +Rule = namedtuple("Rule", ["port_range", "protocol", "target", "target_type", "description"]) +TARGET_TYPES_ALL = {"ipv4", "ipv6", "group", "ip_prefix"} +SOURCE_TYPES_ALL = {"cidr_ip", "cidr_ipv6", "group_id", "group_name", "ip_prefix"} +PORT_TYPES_ALL = {"from_port", "to_port", "ports", "icmp_type", "icmp_code"} current_account_id = None +class SecurityGroupError(Exception): + def __init__(self, msg, e=None, **kwargs): + super().__init__(msg) + self.message = msg + self.exception = e + self.kwargs = kwargs + + # Simple helper to perform the module.fail_... call once we have module available to us + def fail(self, module): + if self.exception: + module.fail_json_aws(self.exception, msg=self.message, **self.kwargs) + module.fail_json(msg=self.message, **self.kwargs) + + def rule_cmp(a, b): """Compare rules without descriptions""" - for prop in ['port_range', 'protocol', 'target', 'target_type']: - if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol): + for prop in ["port_range", "protocol", "target", "target_type"]: + if prop == "port_range" and to_text(a.protocol) == to_text(b.protocol): # equal protocols can interchange `(-1, -1)` and `(None, None)` if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)): continue @@ -506,46 +546,50 @@ def rules_to_permissions(rules): def to_permission(rule): # take a Rule, output the serialized grant perm = { - 'IpProtocol': rule.protocol, + "IpProtocol": rule.protocol, } - perm['FromPort'], perm['ToPort'] = rule.port_range - if rule.target_type == 'ipv4': - perm['IpRanges'] = [{ - 'CidrIp': rule.target, - }] + perm["FromPort"], perm["ToPort"] = rule.port_range + if rule.target_type == "ipv4": + perm["IpRanges"] = [ + { + "CidrIp": rule.target, + } + ] if rule.description: - perm['IpRanges'][0]['Description'] = rule.description - elif rule.target_type == 'ipv6': - perm['Ipv6Ranges'] = [{ - 'CidrIpv6': rule.target, - }] + perm["IpRanges"][0]["Description"] = rule.description + elif rule.target_type == "ipv6": + perm["Ipv6Ranges"] = [ + { + "CidrIpv6": rule.target, + } + ] if rule.description: - perm['Ipv6Ranges'][0]['Description'] = rule.description - elif rule.target_type == 'group': + perm["Ipv6Ranges"][0]["Description"] = rule.description + elif rule.target_type == "group": if isinstance(rule.target, tuple): pair = {} if rule.target[0]: - pair['UserId'] = rule.target[0] + pair["UserId"] = rule.target[0] # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific if rule.target[1]: - pair['GroupId'] = rule.target[1] + pair["GroupId"] = rule.target[1] elif rule.target[2]: - pair['GroupName'] = rule.target[2] - perm['UserIdGroupPairs'] = [pair] + pair["GroupName"] = rule.target[2] + perm["UserIdGroupPairs"] = [pair] else: - perm['UserIdGroupPairs'] = [{ - 'GroupId': rule.target - }] + perm["UserIdGroupPairs"] = [{"GroupId": rule.target}] if rule.description: - perm['UserIdGroupPairs'][0]['Description'] = rule.description - elif rule.target_type == 'ip_prefix': - perm['PrefixListIds'] = [{ - 'PrefixListId': rule.target, - }] + perm["UserIdGroupPairs"][0]["Description"] = rule.description + elif rule.target_type == "ip_prefix": + perm["PrefixListIds"] = [ + { + "PrefixListId": rule.target, + } + ] if rule.description: - perm['PrefixListIds'][0]['Description'] = rule.description - elif rule.target_type not in valid_targets: - raise ValueError('Invalid target type for rule {0}'.format(rule)) + perm["PrefixListIds"][0]["Description"] = rule.description + elif rule.target_type not in TARGET_TYPES_ALL: + raise ValueError(f"Invalid target type for rule {rule}") return fix_port_and_protocol(perm) @@ -560,16 +604,17 @@ def rule_from_group_permission(perm): GroupId is preferred as it is more specific except when targeting 'amazon-' prefixed security groups (such as EC2 Classic ELBs). """ + def ports_from_permission(p): - if 'FromPort' not in p and 'ToPort' not in p: + if "FromPort" not in p and "ToPort" not in p: return (None, None) - return (int(perm['FromPort']), int(perm['ToPort'])) + return (int(perm["FromPort"]), int(perm["ToPort"])) # outputs a rule tuple for target_key, target_subkey, target_type in [ - ('IpRanges', 'CidrIp', 'ipv4'), - ('Ipv6Ranges', 'CidrIpv6', 'ipv6'), - ('PrefixListIds', 'PrefixListId', 'ip_prefix'), + ("IpRanges", "CidrIp", "ipv4"), + ("Ipv6Ranges", "CidrIpv6", "ipv6"), + ("PrefixListIds", "PrefixListId", "ip_prefix"), ]: if target_key not in perm: continue @@ -577,49 +622,45 @@ def rule_from_group_permission(perm): # there may be several IP ranges here, which is ok yield Rule( ports_from_permission(perm), - to_text(perm['IpProtocol']), + to_text(perm["IpProtocol"]), r[target_subkey], target_type, - r.get('Description') + r.get("Description"), ) - if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']: - for pair in perm['UserIdGroupPairs']: + if "UserIdGroupPairs" in perm and perm["UserIdGroupPairs"]: + for pair in perm["UserIdGroupPairs"]: target = ( - pair.get('UserId', current_account_id), - pair.get('GroupId', None), + pair.get("UserId", current_account_id), + pair.get("GroupId", None), None, ) - if pair.get('UserId', '').startswith('amazon-'): + if pair.get("UserId", "").startswith("amazon-"): # amazon-elb and amazon-prefix rules don't need # group-id specified, so remove it when querying # from permission target = ( - pair.get('UserId', None), + pair.get("UserId", None), None, - pair.get('GroupName', None), + pair.get("GroupName", None), ) - elif 'VpcPeeringConnectionId' not in pair and pair['UserId'] != current_account_id: + elif "VpcPeeringConnectionId" not in pair and pair["UserId"] != current_account_id: # EC2-Classic cross-account pass - elif 'VpcPeeringConnectionId' in pair: + elif "VpcPeeringConnectionId" in pair: # EC2-VPC cross-account VPC peering target = ( - pair.get('UserId', None), - pair.get('GroupId', None), + pair.get("UserId", None), + pair.get("GroupId", None), None, ) yield Rule( - ports_from_permission(perm), - to_text(perm['IpProtocol']), - target, - 'group', - pair.get('Description') + ports_from_permission(perm), to_text(perm["IpProtocol"]), target, "group", pair.get("Description") ) # Wrap just this method so we can retry on missing groups -@AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=['InvalidGroup.NotFound']) +@AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=["InvalidGroup.NotFound"]) def get_security_groups_with_backoff(client, **kwargs): return client.describe_security_groups(**kwargs) @@ -627,8 +668,8 @@ def get_security_groups_with_backoff(client, **kwargs): def sg_exists_with_backoff(client, **kwargs): try: return client.describe_security_groups(aws_retry=True, **kwargs) - except is_boto3_error_code('InvalidGroup.NotFound'): - return {'SecurityGroups': []} + except is_boto3_error_code("InvalidGroup.NotFound"): + return {"SecurityGroups": []} def deduplicate_rules_args(rules): @@ -638,49 +679,129 @@ def deduplicate_rules_args(rules): return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) -def validate_rule(module, rule): - VALID_PARAMS = ( - 'cidr_ip', - 'cidr_ipv6', - 'ip_prefix', - 'group_id', - 'group_name', - 'group_desc', - 'proto', - 'from_port', - 'to_port', - 'icmp_type', - 'icmp_code', - 'icmp_keys', - 'rule_desc', - ) - if not isinstance(rule, dict): - module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) - for k in rule: - if k not in VALID_PARAMS: - module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule)) - - if 'group_id' in rule and 'cidr_ip' in rule: - module.fail_json(msg='Specify group_id OR cidr_ip, not both') - elif 'group_name' in rule and 'cidr_ip' in rule: - module.fail_json(msg='Specify group_name OR cidr_ip, not both') - elif 'group_id' in rule and 'cidr_ipv6' in rule: - module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") - elif 'group_name' in rule and 'cidr_ipv6' in rule: - module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") - elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: - module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") - elif 'group_id' in rule and 'group_name' in rule: - module.fail_json(msg='Specify group_id OR group_name, not both') - elif ('icmp_type' in rule or 'icmp_code' in rule) and 'ports' in rule: - module.fail_json(msg='Specify icmp_code/icmp_type OR ports, not both') - elif ('from_port' in rule or 'to_port' in rule) and ('icmp_type' in rule or 'icmp_code' in rule) and 'icmp_keys' not in rule: - module.fail_json(msg='Specify from_port/to_port OR icmp_type/icmp_code, not both') - elif ('icmp_type' in rule or 'icmp_code' in rule) and ('icmp' not in rule['proto']): - module.fail_json(msg='Specify proto: icmp or icmpv6 when using icmp_type/icmp_code') - - -def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): +def validate_rule(rule): + icmp_type = rule.get("icmp_type", None) + icmp_code = rule.get("icmp_code", None) + proto = rule["proto"] + if (icmp_type is not None or icmp_code is not None) and ("icmp" not in proto): + raise SecurityGroupError(msg="Specify proto: icmp or icmpv6 when using icmp_type/icmp_code") + + +def _target_from_rule_with_group_id(rule, groups): + owner_id = current_account_id + FOREIGN_SECURITY_GROUP_REGEX = r"^([^/]+)/?(sg-\S+)?/(\S+)" + foreign_rule = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule["group_id"]) + + if not foreign_rule: + return "group", (owner_id, rule["group_id"], None), False + + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + # Matches on groups like amazon-elb/sg-5a9c116a/amazon-elb-sg, amazon-elb/amazon-elb-sg, + # and peer-VPC groups like 0987654321/sg-1234567890/example + owner_id, group_id, group_name = foreign_rule.groups() + group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) + groups[group_id] = group_instance + groups[group_name] = group_instance + if group_id and group_name: + if group_name.startswith("amazon-"): + # amazon-elb and amazon-prefix rules don't need group_id specified, + group_id = None + else: + # For cross-VPC references we'll use group_id as it is more specific + group_name = None + return "group", (owner_id, group_id, group_name), False + + +def _lookup_target_or_fail(client, group_name, vpc_id, groups, msg): + owner_id = current_account_id + filters = {"group-name": group_name} + if vpc_id: + filters["vpc-id"] = vpc_id + + filters = ansible_dict_to_boto3_filter_list(filters) + try: + found_group = get_security_groups_with_backoff(client, Filters=filters).get("SecurityGroups", [])[0] + except (is_boto3_error_code("InvalidGroup.NotFound"), IndexError): + raise SecurityGroupError(msg=msg) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + raise SecurityGroupError(msg="Failed to get security group", e=e) + + group_id = found_group["GroupId"] + groups[group_id] = found_group + groups[group_name] = found_group + return "group", (owner_id, group_id, None), False + + +def _create_target_from_rule(client, rule, groups, vpc_id, tags, check_mode): + owner_id = current_account_id + # We can't create a group in check mode... + if check_mode: + return "group", (owner_id, None, None), True + + group_name = rule["group_name"] + + try: + created_group = _create_security_group_with_wait(client, group_name, rule["group_desc"], vpc_id, tags) + except is_boto3_error_code("InvalidGroup.Duplicate"): + # The group exists, but didn't show up in any of our previous describe-security-groups calls + # Try searching on a filter for the name, and allow a retry window for AWS to update + # the model on their end. + fail_msg = ( + f"Could not create or use existing group '{group_name}' in rule {rule}. " + "Make sure the group exists and try using the group_id " + "instead of the name" + ) + return _lookup_target_or_fail(client, group_name, vpc_id, groups, fail_msg) + except (BotoCoreError, ClientError) as e: + raise SecurityGroupError(msg="Failed to create security group '{0}' in rule {1}", e=e) + + group_id = created_group["GroupId"] + groups[group_id] = created_group + groups[group_name] = created_group + + return "group", (owner_id, group_id, None), True + + +def _target_from_rule_with_group_name(client, rule, name, group, groups, vpc_id, tags, check_mode): + group_name = rule["group_name"] + owner_id = current_account_id + if group_name == name: + # Simplest case, the rule references itself + group_id = group["GroupId"] + groups[group_id] = group + groups[group_name] = group + return "group", (owner_id, group_id, None), False + + # Already cached groups + if group_name in groups and group.get("VpcId") and groups[group_name].get("VpcId"): + # both are VPC groups, this is ok + group_id = groups[group_name]["GroupId"] + return "group", (owner_id, group_id, None), False + + if group_name in groups and not (group.get("VpcId") or groups[group_name].get("VpcId")): + # both are EC2 classic, this is ok + group_id = groups[group_name]["GroupId"] + return "group", (owner_id, group_id, None), False + + # if we got here, either the target group does not exist, or there + # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC + # is bad, so we have to create a new SG because no compatible group + # exists + + # Without a group description we can't create a new group, try looking up the group, or fail + # with a descriptive error message + if not rule.get("group_desc", "").strip(): + # retry describing the group + fail_msg = ( + f"group '{group_name}' not found and would be automatically created by rule {rule} but " + "no description was provided" + ) + return _lookup_target_or_fail(client, group_name, vpc_id, groups, fail_msg) + + return _create_target_from_rule(client, rule, groups, vpc_id, tags, check_mode) + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id, tags): """ Returns tuple of (target_type, target, group_created) after validating rule params. @@ -697,191 +818,99 @@ def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): values that will be compared to current_rules (from current_ingress and current_egress) in wait_for_rule_propagation(). """ - FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)' - owner_id = current_account_id - group_id = None - group_name = None - target_group_created = False - - validate_rule(module, rule) - if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): - # this is a foreign Security Group. Since you can't fetch it you must create an instance of it - # Matches on groups like amazon-elb/sg-5a9c116a/amazon-elb-sg, amazon-elb/amazon-elb-sg, - # and peer-VPC groups like 0987654321/sg-1234567890/example - owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() - group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) - groups[group_id] = group_instance - groups[group_name] = group_instance - if group_id and group_name: - if group_name.startswith('amazon-'): - # amazon-elb and amazon-prefix rules don't need group_id specified, - group_id = None - else: - # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific - group_name = None - return 'group', (owner_id, group_id, group_name), False - elif 'group_id' in rule: - return 'group', (owner_id, rule['group_id'], None), False - elif 'group_name' in rule: - group_name = rule['group_name'] - if group_name == name: - group_id = group['GroupId'] - groups[group_id] = group - groups[group_name] = group - elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): - # both are VPC groups, this is ok - group_id = groups[group_name]['GroupId'] - elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): - # both are EC2 classic, this is ok - group_id = groups[group_name]['GroupId'] - else: - auto_group = None - filters = {'group-name': group_name} - if vpc_id: - filters['vpc-id'] = vpc_id - # if we got here, either the target group does not exist, or there - # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC - # is bad, so we have to create a new SG because no compatible group - # exists - if not rule.get('group_desc', '').strip(): - # retry describing the group once - try: - auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] - except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError): - module.fail_json(msg="group %s will be automatically created by rule %s but " - "no description was provided" % (group_name, rule)) - except ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - elif not module.check_mode: - params = dict(GroupName=group_name, Description=rule['group_desc']) - if vpc_id: - params['VpcId'] = vpc_id - try: - auto_group = client.create_security_group(aws_retry=True, **params) - get_waiter( - client, 'security_group_exists', - ).wait( - GroupIds=[auto_group['GroupId']], - ) - except is_boto3_error_code('InvalidGroup.Duplicate'): - # The group exists, but didn't show up in any of our describe-security-groups calls - # Try searching on a filter for the name, and allow a retry window for AWS to update - # the model on their end. - try: - auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] - except IndexError: - module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) - except ClientError as e: - module.fail_json_aws( - e, - msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) - if auto_group is not None: - group_id = auto_group['GroupId'] - groups[group_id] = auto_group - groups[group_name] = auto_group - target_group_created = True - return 'group', (owner_id, group_id, None), target_group_created - elif 'cidr_ip' in rule: - return 'ipv4', validate_ip(module, rule['cidr_ip']), False - elif 'cidr_ipv6' in rule: - return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False - elif 'ip_prefix' in rule: - return 'ip_prefix', rule['ip_prefix'], False - - module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule) - - -def ports_expand(ports): - # takes a list of ports and returns a list of (port_from, port_to) - ports_expanded = [] - for port in ports: - if not isinstance(port, string_types): - ports_expanded.append((port,) * 2) - elif '-' in port: - ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1))) - else: - ports_expanded.append((int(port.strip()),) * 2) - - return ports_expanded - - -def rule_expand_ports(rule): - # takes a rule dict and returns a list of expanded rule dicts - # uses icmp_code and icmp_type instead of from_ports and to_ports when - # available. - if 'ports' not in rule: - non_icmp_params = any([ - rule.get('icmp_type', None) is None, rule.get('icmp_code', None) is None]) - conflict = not non_icmp_params and any([ - rule.get('from_port', None), rule.get('to_port', None)]) - - if non_icmp_params: - if isinstance(rule.get('from_port'), string_types): - rule['from_port'] = int(rule.get('from_port')) - if isinstance(rule.get('to_port'), string_types): - rule['to_port'] = int(rule.get('to_port')) - else: - rule['from_port'] = int(rule.get('icmp_type')) if isinstance(rule.get('icmp_type'), string_types) else rule.get('icmp_type') - rule['to_port'] = int(rule.get('icmp_code')) if isinstance(rule.get('icmp_code'), string_types) else rule.get('icmp_code') - # Used temporarily to track the fact that icmp keys were converted - # to from_port/to_port - if not conflict: - rule['icmp_keys'] = True - - return [rule] - - ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + try: + if rule.get("group_id"): + return _target_from_rule_with_group_id(rule, groups) + if "group_name" in rule: + return _target_from_rule_with_group_name(client, rule, name, group, groups, vpc_id, tags, module.check_mode) + if "cidr_ip" in rule: + return "ipv4", validate_ip(module, rule["cidr_ip"]), False + if "cidr_ipv6" in rule: + return "ipv6", validate_ip(module, rule["cidr_ipv6"]), False + if "ip_prefix" in rule: + return "ip_prefix", rule["ip_prefix"], False + except SecurityGroupError as e: + e.fail(module) + + module.fail_json(msg="Could not match target for rule", failed_rule=rule) + + +def _strip_rule(rule): + """ + Returns a copy of the rule with the Target/Source and Port information + from a rule stripped out. + This can then be combined with the expanded information + """ + stripped_rule = deepcopy(rule) + # Get just the non-source/port info from the rule + [stripped_rule.pop(source_type, None) for source_type in SOURCE_TYPES_ALL] + [stripped_rule.pop(port_type, None) for port_type in PORT_TYPES_ALL] + return stripped_rule - rule_expanded = [] - for from_to in ports_expand(ports): - temp_rule = rule.copy() - del temp_rule['ports'] - temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to) - rule_expanded.append(temp_rule) - return rule_expanded +def expand_rules(rules): + if rules is None: + return rules + expanded_rules = [] + for rule in rules: + expanded_rules.extend(expand_rule(rule)) -def rules_expand_ports(rules): - # takes a list of rules and expands it based on 'ports' - if not rules: - return rules + return expanded_rules - return [rule for rule_complex in rules - for rule in rule_expand_ports(rule_complex)] +def expand_rule(rule): + rule = scrub_none_parameters(rule) + ports_list = expand_ports_from_rule(rule) + sources_list = expand_sources_from_rule(rule) + stripped_rule = _strip_rule(rule) -def rule_expand_source(rule, source_type): - # takes a rule dict and returns a list of expanded rule dicts for specified source_type - sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] - source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') + # expands out all possible combinations of ports and sources for the rule + # This results in a list of pairs of dictionaries... + ports_and_sources = itertools.product(ports_list, sources_list) - rule_expanded = [] - for source in sources: - temp_rule = rule.copy() - for s in source_types_all: - temp_rule.pop(s, None) - temp_rule[source_type] = source - rule_expanded.append(temp_rule) + # Combines each pair of port/source dictionaries with rest of the info from the rule + return [{**stripped_rule, **port, **source} for (port, source) in ports_and_sources] - return rule_expanded +def expand_sources_from_rule(rule): + sources = [] + for type_name in sorted(SOURCE_TYPES_ALL): + if rule.get(type_name) is not None: + sources.extend([{type_name: target} for target in rule.get(type_name)]) + if not sources: + raise SecurityGroupError("Unable to find source/target information in rule", rule=rule) + return tuple(sources) -def rule_expand_sources(rule): - # takes a rule dict and returns a list of expanded rule dicts - source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule) - return [r for stype in source_types - for r in rule_expand_source(rule, stype)] +def expand_ports_from_rule(rule): + # While icmp_type/icmp_code could have been aliases, this wouldn't be obvious in the + # documentation + if rule.get("icmp_type") is not None: + return ({"from_port": rule.get("icmp_type"), "to_port": rule.get("icmp_code")},) + if rule.get("from_port") is not None or rule.get("to_port") is not None: + return ({"from_port": rule.get("from_port"), "to_port": rule.get("to_port")},) + if rule.get("ports") is not None: + ports = expand_ports_list(rule.get("ports")) + return tuple({"from_port": from_port, "to_port": to_port} for (from_port, to_port) in ports) + return ({},) -def rules_expand_sources(rules): - # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' - if not rules: - return rules +def expand_ports_list(ports): + # takes a list of ports and returns a list of (port_from, port_to) + ports_expanded = [] + for port in ports: + try: + port_list = (int(port.strip()),) * 2 + except ValueError as e: + # Someone passed a range + if "-" in port: + port_list = [int(p.strip()) for p in port.split("-", 1)] + else: + raise SecurityGroupError("Unable to parse port", port=port) from e + ports_expanded.append(tuple(sorted(port_list))) - return [rule for rule_complex in rules - for rule in rule_expand_sources(rule_complex)] + return ports_expanded def update_rules_description(module, client, rule_type, group_id, ip_permissions): @@ -890,151 +919,166 @@ def update_rules_description(module, client, rule_type, group_id, ip_permissions try: if rule_type == "in": client.update_security_group_rule_descriptions_ingress( - aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions + ) if rule_type == "out": client.update_security_group_rule_descriptions_egress( - aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions + ) except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id) + module.fail_json_aws(e, msg=f"Unable to update rule description for group {group_id}") def fix_port_and_protocol(permission): - for key in ('FromPort', 'ToPort'): + for key in ("FromPort", "ToPort"): if key in permission: if permission[key] is None: del permission[key] else: permission[key] = int(permission[key]) - permission['IpProtocol'] = to_text(permission['IpProtocol']) + permission["IpProtocol"] = to_text(permission["IpProtocol"]) return permission def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id): if revoke_ingress: - revoke(client, module, revoke_ingress, group_id, 'in') + revoke(client, module, revoke_ingress, group_id, "in") if revoke_egress: - revoke(client, module, revoke_egress, group_id, 'out') + revoke(client, module, revoke_egress, group_id, "out") return bool(revoke_ingress or revoke_egress) def revoke(client, module, ip_permissions, group_id, rule_type): if not module.check_mode: try: - if rule_type == 'in': - client.revoke_security_group_ingress( - aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) - elif rule_type == 'out': - client.revoke_security_group_egress( - aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + if rule_type == "in": + client.revoke_security_group_ingress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == "out": + client.revoke_security_group_egress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) except (BotoCoreError, ClientError) as e: - rules = 'ingress rules' if rule_type == 'in' else 'egress rules' - module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions)) + rules = "ingress rules" if rule_type == "in" else "egress rules" + module.fail_json_aws(e, f"Unable to revoke {rules}: {ip_permissions}") def add_new_permissions(client, module, new_ingress, new_egress, group_id): if new_ingress: - authorize(client, module, new_ingress, group_id, 'in') + authorize(client, module, new_ingress, group_id, "in") if new_egress: - authorize(client, module, new_egress, group_id, 'out') + authorize(client, module, new_egress, group_id, "out") return bool(new_ingress or new_egress) def authorize(client, module, ip_permissions, group_id, rule_type): if not module.check_mode: try: - if rule_type == 'in': - client.authorize_security_group_ingress( - aws_retry=True, - GroupId=group_id, IpPermissions=ip_permissions) - elif rule_type == 'out': - client.authorize_security_group_egress( - aws_retry=True, - GroupId=group_id, IpPermissions=ip_permissions) + if rule_type == "in": + client.authorize_security_group_ingress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == "out": + client.authorize_security_group_egress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) except (BotoCoreError, ClientError) as e: - rules = 'ingress rules' if rule_type == 'in' else 'egress rules' - module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions)) + rules = "ingress rules" if rule_type == "in" else "egress rules" + module.fail_json_aws(e, f"Unable to authorize {rules}: {ip_permissions}") def validate_ip(module, cidr_ip): - split_addr = cidr_ip.split('/') - if len(split_addr) == 2: - # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set - # Get the network bits if IPv4, and validate if IPv6. - try: - ip = to_subnet(split_addr[0], split_addr[1]) - if ip != cidr_ip: - module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format( - cidr_ip, ip)) - except ValueError: - # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here - try: - isinstance(ip_network(to_text(cidr_ip)), IPv6Network) - ip = cidr_ip - except ValueError: - # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError - # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits - ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] - if ip6 != cidr_ip: - module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6)) - return ip6 - return ip - return cidr_ip - - -def update_tags(client, module, group_id, current_tags, tags, purge_tags): - tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) - - if not module.check_mode: - if tags_to_delete: - try: - client.delete_tags(aws_retry=True, Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete]) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete)) - - # Add/update tags - if tags_need_modify: - try: - client.create_tags(aws_retry=True, Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to add tags {0}".format(tags_need_modify)) - - return bool(tags_need_modify or tags_to_delete) + split_addr = cidr_ip.split("/") + if len(split_addr) != 2: + return cidr_ip + try: + ip = ip_network(to_text(cidr_ip)) + return str(ip) + except ValueError: + # If a host bit is incorrectly set, ip_network will throw an error at us, + # we'll continue, convert the address to a CIDR AWS will accept, but issue a warning. + pass + + # Try evaluating as an IPv4 network, it'll throw a ValueError if it can't parse cidr_ip as an + # IPv4 network + try: + ip = to_subnet(split_addr[0], split_addr[1]) + module.warn( + f"One of your CIDR addresses ({cidr_ip}) has host bits set. To get rid of this warning, check the network" + f" mask and make sure that only network bits are set: {ip}." + ) + return ip + except ValueError: + pass -def update_rule_descriptions(module, client, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list): + # Try again, evaluating as an IPv6 network. + try: + ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] + module.warn( + f"One of your IPv6 CIDR addresses ({cidr_ip}) has host bits set. To get rid of this warning, check the" + f" network mask and make sure that only network bits are set: {ip6}." + ) + return ip6 + except ValueError: + module.warn(f"Unable to parse CIDR ({cidr_ip}).") + return cidr_ip + + +def update_rule_descriptions( + module, client, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list +): changed = False ingress_needs_desc_update = [] egress_needs_desc_update = [] for present_rule in present_egress: - needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + needs_update = [ + r + for r in named_tuple_egress_list + if rule_cmp(r, present_rule) and r.description != present_rule.description + ] for r in needs_update: named_tuple_egress_list.remove(r) egress_needs_desc_update.extend(needs_update) for present_rule in present_ingress: - needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + needs_update = [ + r + for r in named_tuple_ingress_list + if rule_cmp(r, present_rule) and r.description != present_rule.description + ] for r in needs_update: named_tuple_ingress_list.remove(r) ingress_needs_desc_update.extend(needs_update) if ingress_needs_desc_update: - update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update)) + update_rules_description(module, client, "in", group_id, rules_to_permissions(ingress_needs_desc_update)) changed |= True if egress_needs_desc_update: - update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update)) + update_rules_description(module, client, "out", group_id, rules_to_permissions(egress_needs_desc_update)) changed |= True return changed -def create_security_group(client, module, name, description, vpc_id): +def _create_security_group_with_wait(client, name, description, vpc_id, tags): + params = dict(GroupName=name, Description=description) + if vpc_id: + params["VpcId"] = vpc_id + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, ["security-group"]) + + created_group = client.create_security_group(aws_retry=True, **params) + get_waiter( + client, + "security_group_exists", + ).wait( + GroupIds=[created_group["GroupId"]], + ) + return created_group + + +def create_security_group(client, module, name, description, vpc_id, tags): if not module.check_mode: params = dict(GroupName=name, Description=description) if vpc_id: - params['VpcId'] = vpc_id + params["VpcId"] = vpc_id + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, ["security-group"]) try: group = client.create_security_group(aws_retry=True, **params) except (BotoCoreError, ClientError) as e: @@ -1046,8 +1090,8 @@ def create_security_group(client, module, name, description, vpc_id): # amazon sometimes takes a couple seconds to update the security group so wait till it exists while True: sleep(3) - group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] - if group.get('VpcId') and not group.get('IpPermissionsEgress'): + group = get_security_groups_with_backoff(client, GroupIds=[group["GroupId"]])["SecurityGroups"][0] + if group.get("VpcId") and not group.get("IpPermissionsEgress"): pass else: break @@ -1056,7 +1100,7 @@ def create_security_group(client, module, name, description, vpc_id): def wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_ingress, purge_egress): - group_id = group['GroupId'] + group_id = group["GroupId"] tries = 6 def await_rules(group, desired_rules, purge, rule_key): @@ -1076,39 +1120,47 @@ def wait_for_rule_propagation(module, client, group, desired_ingress, desired_eg elif current_rules.issuperset(desired_rules) and not purge: return group sleep(10) - group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0] - module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules)) + group = get_security_groups_with_backoff(client, GroupIds=[group_id])["SecurityGroups"][0] + module.warn( + f"Ran out of time waiting for {group_id} {rule_key}. Current: {current_rules}, Desired: {desired_rules}" + ) return group - group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0] - if 'VpcId' in group and module.params.get('rules_egress') is not None: - group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress') - return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions') + group = get_security_groups_with_backoff(client, GroupIds=[group_id])["SecurityGroups"][0] + if "VpcId" in group and module.params.get("rules_egress") is not None: + group = await_rules(group, desired_egress, purge_egress, "IpPermissionsEgress") + return await_rules(group, desired_ingress, purge_ingress, "IpPermissions") def group_exists(client, module, vpc_id, group_id, name): - params = {'Filters': []} + filters = dict() + params = dict() if group_id: - params['GroupIds'] = [group_id] + if isinstance(group_id, list): + params["GroupIds"] = group_id + else: + params["GroupIds"] = [group_id] if name: # Add name to filters rather than params['GroupNames'] # because params['GroupNames'] only checks the default vpc if no vpc is provided - params['Filters'].append({'Name': 'group-name', 'Values': [name]}) + filters["group-name"] = name if vpc_id: - params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]}) + filters["vpc-id"] = vpc_id # Don't filter by description to maintain backwards compatibility - + params["Filters"] = ansible_dict_to_boto3_filter_list(filters) try: - security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', []) - all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', []) + security_groups = sg_exists_with_backoff(client, **params).get("SecurityGroups", []) + all_groups = get_security_groups_with_backoff(client).get("SecurityGroups", []) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Error in describe_security_groups") if security_groups: - groups = dict((group['GroupId'], group) for group in all_groups) - groups.update(dict((group['GroupName'], group) for group in all_groups)) + groups = dict((group["GroupId"], group) for group in all_groups) + groups.update(dict((group["GroupName"], group) for group in all_groups)) if vpc_id: - vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id) + vpc_wins = dict( + (group["GroupName"], group) for group in all_groups if group.get("VpcId") and group["VpcId"] == vpc_id + ) groups.update(vpc_wins) # maintain backwards compatibility by using the last matching group return security_groups[-1], groups @@ -1118,9 +1170,9 @@ def group_exists(client, module, vpc_id, group_id, name): def get_diff_final_resource(client, module, security_group): def get_account_id(security_group, module): try: - owner_id = security_group.get('owner_id', current_account_id) + owner_id = security_group.get("owner_id", current_account_id) except (BotoCoreError, ClientError) as e: - owner_id = "Unable to determine owner_id: {0}".format(to_text(e)) + owner_id = f"Unable to determine owner_id: {to_text(e)}" return owner_id def get_final_tags(security_group_tags, specified_tags, purge_tags): @@ -1142,88 +1194,108 @@ def get_diff_final_resource(client, module, security_group): specified_rules = flatten_nested_targets(module, deepcopy(specified_rules)) for rule in specified_rules: format_rule = { - 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'), - 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': [] + "from_port": None, + "to_port": None, + "ip_protocol": rule.get("proto"), + "ip_ranges": [], + "ipv6_ranges": [], + "prefix_list_ids": [], + "user_id_group_pairs": [], } - if rule.get('proto', 'tcp') in ('all', '-1', -1): - format_rule['ip_protocol'] = '-1' - format_rule.pop('from_port') - format_rule.pop('to_port') - elif rule.get('ports'): - if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)): - rule['ports'] = [rule['ports']] - for port in rule.get('ports'): - if isinstance(port, string_types) and '-' in port: - format_rule['from_port'], format_rule['to_port'] = port.split('-') + if rule.get("proto") in ("all", "-1", -1): + format_rule["ip_protocol"] = "-1" + format_rule.pop("from_port") + format_rule.pop("to_port") + elif rule.get("ports"): + if rule.get("ports") and (isinstance(rule["ports"], string_types) or isinstance(rule["ports"], int)): + rule["ports"] = [rule["ports"]] + for port in rule.get("ports"): + if isinstance(port, string_types) and "-" in port: + format_rule["from_port"], format_rule["to_port"] = port.split("-") else: - format_rule['from_port'] = format_rule['to_port'] = port - elif rule.get('from_port') or rule.get('to_port'): - format_rule['from_port'] = rule.get('from_port', rule.get('to_port')) - format_rule['to_port'] = rule.get('to_port', rule.get('from_port')) - for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'): + format_rule["from_port"] = format_rule["to_port"] = port + elif rule.get("from_port") or rule.get("to_port"): + format_rule["from_port"] = rule.get("from_port", rule.get("to_port")) + format_rule["to_port"] = rule.get("to_port", rule.get("from_port")) + for source_type in ("cidr_ip", "cidr_ipv6", "prefix_list_id"): if rule.get(source_type): - rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type) - if rule.get('rule_desc'): - format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}] + rule_key = { + "cidr_ip": "ip_ranges", + "cidr_ipv6": "ipv6_ranges", + "prefix_list_id": "prefix_list_ids", + }.get(source_type) + if rule.get("rule_desc"): + format_rule[rule_key] = [{source_type: rule[source_type], "description": rule["rule_desc"]}] else: if not isinstance(rule[source_type], list): rule[source_type] = [rule[source_type]] format_rule[rule_key] = [{source_type: target} for target in rule[source_type]] - if rule.get('group_id') or rule.get('group_name'): - rule_sg = group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0] + if rule.get("group_id") or rule.get("group_name"): + # XXX bug - doesn't cope with a list of ids/names + rule_sg = group_exists( + client, module, module.params["vpc_id"], rule.get("group_id"), rule.get("group_name") + )[0] if rule_sg is None: # --diff during --check - format_rule['user_id_group_pairs'] = [{ - 'group_id': rule.get('group_id'), - 'group_name': rule.get('group_name'), - 'peering_status': None, - 'user_id': get_account_id(security_group, module), - 'vpc_id': module.params['vpc_id'], - 'vpc_peering_connection_id': None - }] + format_rule["user_id_group_pairs"] = [ + { + "group_id": rule.get("group_id"), + "group_name": rule.get("group_name"), + "peering_status": None, + "user_id": get_account_id(security_group, module), + "vpc_id": module.params["vpc_id"], + "vpc_peering_connection_id": None, + } + ] else: rule_sg = camel_dict_to_snake_dict(rule_sg) - format_rule['user_id_group_pairs'] = [{ - 'description': rule_sg.get('description', rule_sg.get('group_desc')), - 'group_id': rule_sg.get('group_id', rule.get('group_id')), - 'group_name': rule_sg.get('group_name', rule.get('group_name')), - 'peering_status': rule_sg.get('peering_status'), - 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)), - 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']), - 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id') - }] - for k, v in list(format_rule['user_id_group_pairs'][0].items()): + format_rule["user_id_group_pairs"] = [ + { + "description": rule_sg.get("description", rule_sg.get("group_desc")), + "group_id": rule_sg.get("group_id", rule.get("group_id")), + "group_name": rule_sg.get("group_name", rule.get("group_name")), + "peering_status": rule_sg.get("peering_status"), + "user_id": rule_sg.get("user_id", get_account_id(security_group, module)), + "vpc_id": rule_sg.get("vpc_id", module.params["vpc_id"]), + "vpc_peering_connection_id": rule_sg.get("vpc_peering_connection_id"), + } + ] + for k, v in list(format_rule["user_id_group_pairs"][0].items()): if v is None: - format_rule['user_id_group_pairs'][0].pop(k) + format_rule["user_id_group_pairs"][0].pop(k) final_rules.append(format_rule) - # Order final rules consistently - final_rules.sort(key=get_ip_permissions_sort_key) return final_rules - security_group_ingress = security_group.get('ip_permissions', []) - specified_ingress = module.params['rules'] - purge_ingress = module.params['purge_rules'] - security_group_egress = security_group.get('ip_permissions_egress', []) - specified_egress = module.params['rules_egress'] - purge_egress = module.params['purge_rules_egress'] + security_group_ingress = security_group.get("ip_permissions", []) + specified_ingress = module.params["rules"] + purge_ingress = module.params["purge_rules"] + security_group_egress = security_group.get("ip_permissions_egress", []) + specified_egress = module.params["rules_egress"] + purge_egress = module.params["purge_rules_egress"] return { - 'description': module.params['description'], - 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'), - 'group_name': security_group.get('group_name', module.params['name']), - 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), - 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), - 'owner_id': get_account_id(security_group, module), - 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']), - 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])} + "description": module.params["description"], + "group_id": security_group.get("group_id", "sg-xxxxxxxx"), + "group_name": security_group.get("group_name", module.params["name"]), + "ip_permissions": get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), + "ip_permissions_egress": get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), + "owner_id": get_account_id(security_group, module), + "tags": get_final_tags(security_group.get("tags", {}), module.params["tags"], module.params["purge_tags"]), + "vpc_id": security_group.get("vpc_id", module.params["vpc_id"]), + } def flatten_nested_targets(module, rules): def _flatten(targets): for target in targets: if isinstance(target, list): - module.deprecate('Support for nested lists in cidr_ip and cidr_ipv6 has been ' - 'deprecated. The flatten filter can be used instead.', - date='2024-12-01', collection_name='amazon.aws') + module.deprecate( + ( + "Support for nested lists in cidr_ip and cidr_ipv6 has been " + "deprecated. The flatten filter can be used instead." + ), + date="2024-12-01", + collection_name="amazon.aws", + ) for t in _flatten(target): yield t elif isinstance(target, string_types): @@ -1232,86 +1304,345 @@ def flatten_nested_targets(module, rules): if rules is not None: for rule in rules: target_list_type = None - if isinstance(rule.get('cidr_ip'), list): - target_list_type = 'cidr_ip' - elif isinstance(rule.get('cidr_ipv6'), list): - target_list_type = 'cidr_ipv6' + if isinstance(rule.get("cidr_ip"), list): + target_list_type = "cidr_ip" + elif isinstance(rule.get("cidr_ipv6"), list): + target_list_type = "cidr_ipv6" if target_list_type is not None: rule[target_list_type] = list(_flatten(rule[target_list_type])) return rules def get_rule_sort_key(dicts): - if dicts.get('cidr_ip'): - return dicts.get('cidr_ip') - elif dicts.get('cidr_ipv6'): - return dicts.get('cidr_ipv6') - elif dicts.get('prefix_list_id'): - return dicts.get('prefix_list_id') - elif dicts.get('group_id'): - return dicts.get('group_id') + if dicts.get("cidr_ip"): + return str(dicts.get("cidr_ip")) + if dicts.get("cidr_ipv6"): + return str(dicts.get("cidr_ipv6")) + if dicts.get("prefix_list_id"): + return str(dicts.get("prefix_list_id")) + if dicts.get("group_id"): + return str(dicts.get("group_id")) return None def get_ip_permissions_sort_key(rule): - if rule.get('ip_ranges'): - rule.get('ip_ranges').sort(key=get_rule_sort_key) - return rule.get('ip_ranges')[0]['cidr_ip'] - elif rule.get('ipv6_ranges'): - rule.get('ipv6_ranges').sort(key=get_rule_sort_key) - return rule.get('ipv6_ranges')[0]['cidr_ipv6'] - elif rule.get('prefix_list_ids'): - rule.get('prefix_list_ids').sort(key=get_rule_sort_key) - return rule.get('prefix_list_ids')[0]['prefix_list_id'] - elif rule.get('user_id_group_pairs'): - rule.get('user_id_group_pairs').sort(key=get_rule_sort_key) - return rule.get('user_id_group_pairs')[0].get('group_id', '') + RULE_KEYS_ALL = {"ip_ranges", "ipv6_ranges", "prefix_list_ids", "user_id_group_pairs"} + # Ensure content of these keys is sorted + for rule_key in RULE_KEYS_ALL: + if rule.get(rule_key): + rule.get(rule_key).sort(key=get_rule_sort_key) + + # Returns the first value plus a prefix so the types get clustered together when sorted + if rule.get("ip_ranges"): + value = str(rule.get("ip_ranges")[0]["cidr_ip"]) + return f"ipv4:{value}" + if rule.get("ipv6_ranges"): + value = str(rule.get("ipv6_ranges")[0]["cidr_ipv6"]) + return f"ipv6:{value}" + if rule.get("prefix_list_ids"): + value = str(rule.get("prefix_list_ids")[0]["prefix_list_id"]) + return f"pl:{value}" + if rule.get("user_id_group_pairs"): + value = str(rule.get("user_id_group_pairs")[0].get("group_id", "")) + return f"ugid:{value}" return None +def sort_security_group(security_group): + if not security_group: + return security_group + + if security_group.get("ip_permissions"): + security_group["ip_permissions"].sort(key=get_ip_permissions_sort_key) + if security_group.get("ip_permissions_egress"): + security_group["ip_permissions_egress"].sort(key=get_ip_permissions_sort_key) + + return security_group + + +def validate_rules(module, rules): + if not rules: + return + try: + for rule in rules: + validate_rule(rule) + except SecurityGroupError as e: + e.fail(module) + + +def ensure_absent(client, group, check_mode): + if not group: + return False + if check_mode: + return True + + try: + client.delete_security_group(aws_retry=True, GroupId=group["GroupId"]) + except is_boto3_error_code("InvalidGroup.NotFound"): + return False + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise SecurityGroupError(f"Unable to delete security group '{group}'", e=e) + + return True + + +def ensure_present(module, client, group, groups): + name = module.params["name"] + group_id = module.params["group_id"] + description = module.params["description"] + vpc_id = module.params["vpc_id"] + # Deprecated + rules = flatten_nested_targets(module, deepcopy(module.params["rules"])) + rules_egress = flatten_nested_targets(module, deepcopy(module.params["rules_egress"])) + # /end Deprecated + validate_rules(module, rules) + validate_rules(module, rules_egress) + rules = deduplicate_rules_args(expand_rules(rules)) + rules_egress = deduplicate_rules_args(expand_rules(rules_egress)) + state = module.params.get("state") + purge_rules = module.params["purge_rules"] + purge_rules_egress = module.params["purge_rules_egress"] + tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + + changed = False + group_created_new = False + + if not group: + # Short circuit things if we're in check_mode + if module.check_mode: + return True, None + + group = create_security_group(client, module, name, description, vpc_id, tags) + group_created_new = True + changed = True + + else: + # Description is immutable + if group["Description"] != description: + module.warn( + "Group description does not match existing group. Descriptions cannot be changed without deleting " + "and re-creating the security group. Try using state=absent to delete, then rerunning this task." + ) + + changed |= ensure_ec2_tags(client, module, group["GroupId"], tags=tags, purge_tags=purge_tags) + + named_tuple_ingress_list = [] + named_tuple_egress_list = [] + current_ingress = sum([list(rule_from_group_permission(p)) for p in group["IpPermissions"]], []) + current_egress = sum([list(rule_from_group_permission(p)) for p in group["IpPermissionsEgress"]], []) + + for new_rules, _rule_type, named_tuple_rule_list in [ + (rules, "in", named_tuple_ingress_list), + (rules_egress, "out", named_tuple_egress_list), + ]: + if new_rules is None: + continue + for rule in new_rules: + target_type, target, target_group_created = get_target_from_rule( + module, client, rule, name, group, groups, vpc_id, tags + ) + changed |= target_group_created + + if rule.get("proto") in ("all", "-1", -1): + rule["proto"] = "-1" + rule["from_port"] = None + rule["to_port"] = None + + try: + int(rule.get("proto")) + rule["proto"] = to_text(rule.get("proto")) + rule["from_port"] = None + rule["to_port"] = None + except ValueError: + # rule does not use numeric protocol spec + pass + named_tuple_rule_list.append( + Rule( + port_range=(rule["from_port"], rule["to_port"]), + protocol=to_text(rule.get("proto")), + target=target, + target_type=target_type, + description=rule.get("rule_desc"), + ) + ) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] + + if module.params.get("rules_egress") is None and "VpcId" in group: + # when no egress rules are specified and we're in a VPC, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + rule = Rule((None, None), "-1", "0.0.0.0/0", "ipv4", None) + if rule in current_egress: + named_tuple_egress_list.append(rule) + if rule not in current_egress: + current_egress.append(rule) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) + present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) + + if purge_rules: + revoke_ingress = [] + for p in present_ingress: + if not any(rule_cmp(p, b) for b in named_tuple_ingress_list): + revoke_ingress.append(to_permission(p)) + else: + revoke_ingress = [] + + if purge_rules_egress and module.params.get("rules_egress") is not None: + revoke_egress = [] + for p in present_egress: + if not any(rule_cmp(p, b) for b in named_tuple_egress_list): + revoke_egress.append(to_permission(p)) + else: + revoke_egress = [] + + # named_tuple_ingress_list and named_tuple_egress_list get updated by + # method update_rule_descriptions, deep copy these two lists to new + # variables for the record of the 'desired' ingress and egress sg permissions + desired_ingress = deepcopy(named_tuple_ingress_list) + desired_egress = deepcopy(named_tuple_egress_list) + + changed |= update_rule_descriptions( + module, + client, + group["GroupId"], + present_ingress, + named_tuple_ingress_list, + present_egress, + named_tuple_egress_list, + ) + + # Revoke old rules + changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group["GroupId"]) + + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) + new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) + # Authorize new rules + changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group["GroupId"]) + + if group_created_new and module.params.get("rules") is None and module.params.get("rules_egress") is None: + # A new group with no rules provided is already being awaited. + # When it is created we wait for the default egress rule to be added by AWS + security_group = get_security_groups_with_backoff(client, GroupIds=[group["GroupId"]])["SecurityGroups"][0] + elif changed and not module.check_mode: + # keep pulling until current security group rules match the desired ingress and egress rules + security_group = wait_for_rule_propagation( + module, client, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress + ) + else: + security_group = get_security_groups_with_backoff(client, GroupIds=[group["GroupId"]])["SecurityGroups"][0] + security_group = camel_dict_to_snake_dict(security_group, ignore_list=["Tags"]) + security_group["tags"] = boto3_tag_list_to_ansible_dict(security_group.get("tags", [])) + + return changed, security_group + + def main(): + rule_spec = dict( + rule_desc=dict(type="str"), + # We have historically allowed for lists of lists in cidr_ip and cidr_ipv6 + # https://github.com/ansible-collections/amazon.aws/pull/1213 + cidr_ip=dict(type="list", elements="raw"), + cidr_ipv6=dict(type="list", elements="raw"), + ip_prefix=dict(type="list", elements="str"), + group_id=dict(type="list", elements="str"), + group_name=dict(type="list", elements="str"), + group_desc=dict(type="str"), + proto=dict(type="str", default="tcp"), + ports=dict(type="list", elements="str"), + from_port=dict(type="int"), + to_port=dict(type="int"), + icmp_type=dict(type="int"), + icmp_code=dict(type="int"), + ) + rule_requirements = dict( + mutually_exclusive=( + # PORTS / ICMP_TYPE + ICMP_CODE / TO_PORT + FROM_PORT + ( + "ports", + "to_port", + ), + ( + "ports", + "from_port", + ), + ( + "ports", + "icmp_type", + ), + ( + "ports", + "icmp_code", + ), + ( + "icmp_type", + "to_port", + ), + ( + "icmp_code", + "to_port", + ), + ( + "icmp_type", + "from_port", + ), + ( + "icmp_code", + "from_port", + ), + ), + required_one_of=( + # A target must be specified + ( + "group_id", + "group_name", + "cidr_ip", + "cidr_ipv6", + "ip_prefix", + ), + ), + required_by=dict( + # If you specify an ICMP code, you must specify the ICMP type + icmp_code=("icmp_type",), + ), + ) + argument_spec = dict( name=dict(), group_id=dict(), description=dict(), vpc_id=dict(), - rules=dict(type='list', elements='dict'), - rules_egress=dict(type='list', elements='dict', aliases=['egress_rules']), - state=dict(default='present', type='str', choices=['present', 'absent']), - purge_rules=dict(default=True, required=False, type='bool'), - purge_rules_egress=dict(default=True, required=False, type='bool', aliases=['purge_egress_rules']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, required=False, type='bool') + rules=dict(type="list", elements="dict", options=rule_spec, **rule_requirements), + rules_egress=dict( + type="list", elements="dict", aliases=["egress_rules"], options=rule_spec, **rule_requirements + ), + state=dict(default="present", type="str", choices=["present", "absent"]), + purge_rules=dict(default=True, required=False, type="bool"), + purge_rules_egress=dict(default=True, required=False, type="bool", aliases=["purge_egress_rules"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, required=False, type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[['name', 'group_id']], - required_if=[['state', 'present', ['name']]], + required_one_of=[["name", "group_id"]], + required_if=[["state", "present", ["name", "description"]]], ) - name = module.params['name'] - group_id = module.params['group_id'] - description = module.params['description'] - vpc_id = module.params['vpc_id'] - rules = flatten_nested_targets(module, deepcopy(module.params['rules'])) - rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress'])) - rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules))) - rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress))) - state = module.params.get('state') - purge_rules = module.params['purge_rules'] - purge_rules_egress = module.params['purge_rules_egress'] - tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - - if state == 'present' and not description: - module.fail_json(msg='Must provide description when state is present.') + name = module.params["name"] + group_id = module.params["group_id"] + vpc_id = module.params["vpc_id"] + state = module.params.get("state") - changed = False - client = module.client('ec2', AWSRetry.jittered_backoff()) + client = module.client("ec2", AWSRetry.jittered_backoff()) group, groups = group_exists(client, module, vpc_id, group_id, name) - group_created_new = not bool(group) global current_account_id current_account_id = get_aws_account_id(module) @@ -1319,165 +1650,36 @@ def main(): before = {} after = {} - # Ensure requested group is absent - if state == 'absent': - if group: - # found a match, delete it - before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) - before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) - try: - if not module.check_mode: - client.delete_security_group(aws_retry=True, GroupId=group['GroupId']) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group) - else: - group = None - changed = True - else: - # no match found, no changes required - pass - - # Ensure requested group is present - elif state == 'present': - if group: - # existing group - before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) - before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) - if group['Description'] != description: - module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " - "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") - else: - # no match found, create it - group = create_security_group(client, module, name, description, vpc_id) - changed = True - - if tags is not None and group is not None: - current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) - changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags) - if group: - named_tuple_ingress_list = [] - named_tuple_egress_list = [] - current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], []) - current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], []) - - for new_rules, _rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list), - (rules_egress, 'out', named_tuple_egress_list)]: - if new_rules is None: - continue - for rule in new_rules: - target_type, target, target_group_created = get_target_from_rule( - module, client, rule, name, group, groups, vpc_id) - changed |= target_group_created - - rule.pop('icmp_type', None) - rule.pop('icmp_code', None) - rule.pop('icmp_keys', None) - - if rule.get('proto', 'tcp') in ('all', '-1', -1): - rule['proto'] = '-1' - rule['from_port'] = None - rule['to_port'] = None - - try: - int(rule.get('proto', 'tcp')) - rule['proto'] = to_text(rule.get('proto', 'tcp')) - rule['from_port'] = None - rule['to_port'] = None - except ValueError: - # rule does not use numeric protocol spec - pass - named_tuple_rule_list.append( - Rule( - port_range=(rule['from_port'], rule['to_port']), - protocol=to_text(rule.get('proto', 'tcp')), - target=target, target_type=target_type, - description=rule.get('rule_desc'), - ) - ) + before = camel_dict_to_snake_dict(group, ignore_list=["Tags"]) + before["tags"] = boto3_tag_list_to_ansible_dict(before.get("tags", [])) - # List comprehensions for rules to add, rules to modify, and rule ids to determine purging - new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] - new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] - - if module.params.get('rules_egress') is None and 'VpcId' in group: - # when no egress rules are specified and we're in a VPC, - # we add in a default allow all out rule, which was the - # default behavior before egress rules were added - rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) - if rule in current_egress: - named_tuple_egress_list.append(rule) - if rule not in current_egress: - current_egress.append(rule) - - # List comprehensions for rules to add, rules to modify, and rule ids to determine purging - present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) - present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) - - if purge_rules: - revoke_ingress = [] - for p in present_ingress: - if not any(rule_cmp(p, b) for b in named_tuple_ingress_list): - revoke_ingress.append(to_permission(p)) - else: - revoke_ingress = [] - if purge_rules_egress and module.params.get('rules_egress') is not None: - if module.params.get('rules_egress') is []: - revoke_egress = [ - to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list) - if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) - ] - else: - revoke_egress = [] - for p in present_egress: - if not any(rule_cmp(p, b) for b in named_tuple_egress_list): - revoke_egress.append(to_permission(p)) - else: - revoke_egress = [] - - # named_tuple_ingress_list and named_tuple_egress_list get updated by - # method update_rule_descriptions, deep copy these two lists to new - # variables for the record of the 'desired' ingress and egress sg permissions - desired_ingress = deepcopy(named_tuple_ingress_list) - desired_egress = deepcopy(named_tuple_egress_list) - - changed |= update_rule_descriptions(module, client, group['GroupId'], present_ingress, - named_tuple_ingress_list, present_egress, named_tuple_egress_list) - - # Revoke old rules - changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId']) - - new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] - new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) - new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) - # Authorize new rules - changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId']) - - if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None: - # A new group with no rules provided is already being awaited. - # When it is created we wait for the default egress rule to be added by AWS - security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] - elif changed and not module.check_mode: - # keep pulling until current security group rules match the desired ingress and egress rules - security_group = wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress) - else: - security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] - security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags']) - security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', [])) - - else: - security_group = {'group_id': None} + try: + # Ensure requested group is absent + if state == "absent": + changed = ensure_absent(client, group, module.check_mode) + security_group = {"group_id": None} + # Ensure requested group is present + elif state == "present": + (changed, security_group) = ensure_present(module, client, group, groups) + # Check mode can't create anything + if not security_group: + security_group = {"group_id": None} + except SecurityGroupError as e: + e.fail(module) if module._diff: - if module.params['state'] == 'present': + if state == "present": after = get_diff_final_resource(client, module, security_group) - if before.get('ip_permissions'): - before['ip_permissions'].sort(key=get_ip_permissions_sort_key) - security_group['diff'] = [{'before': before, 'after': after}] + # Order final rules consistently + before = sort_security_group(before) + after = sort_security_group(after) + + security_group["diff"] = [{"before": before, "after": after}] module.exit_json(changed=changed, **security_group) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py index 3440f90e8..8b7a04ba1 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_security_group_info version_added: 1.0.0 @@ -32,13 +30,12 @@ notes: change. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all security groups @@ -83,9 +80,9 @@ EXAMPLES = ''' - amazon.aws.ec2_security_group_info: filters: "tag:Name": Example -''' +""" -RETURN = ''' +RETURN = r""" security_groups: description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group. type: list @@ -248,29 +245,28 @@ security_groups: "vpc_id": "vpc-0bc3bb03f97405435" } ] -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def main(): - argument_spec = dict( - filters=dict(default={}, type='dict') - ) + argument_spec = dict(filters=dict(default={}, type="dict")) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', AWSRetry.jittered_backoff()) + connection = module.client("ec2", AWSRetry.jittered_backoff()) # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags filters = module.params.get("filters") @@ -284,22 +280,23 @@ def main(): try: security_groups = connection.describe_security_groups( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to describe security groups') + module.fail_json_aws(e, msg="Failed to describe security groups") snaked_security_groups = [] - for security_group in security_groups['SecurityGroups']: + for security_group in security_groups["SecurityGroups"]: # Modify boto3 tags list to be ansible friendly dict # but don't camel case tags security_group = camel_dict_to_snake_dict(security_group) - security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value') + security_group["tags"] = boto3_tag_list_to_ansible_dict( + security_group.get("tags", {}), tag_name_key_name="key", tag_value_key_name="value" + ) snaked_security_groups.append(security_group) module.exit_json(security_groups=snaked_security_groups) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py index 62952cf32..1ca33b039 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_snapshot version_added: 1.0.0 @@ -72,14 +70,47 @@ options: required: false default: 0 type: int + modify_create_vol_permission: + description: + - If set to C(true), ec2 snapshot's createVolumePermissions can be modified. + required: false + type: bool + version_added: 6.1.0 + purge_create_vol_permission: + description: + - Whether unspecified group names or user IDs should be removed from the snapshot createVolumePermission. + - Must set I(modify_create_vol_permission) to C(True) for when I(purge_create_vol_permission) is set to C(True). + required: False + type: bool + default: False + version_added: 6.1.0 + group_names: + description: + - The group to be added or removed. The possible value is C(all). + - Mutually exclusive with I(user_ids). + required: false + type: list + elements: str + choices: ["all"] + version_added: 6.1.0 + user_ids: + description: + - The account user IDs to be added or removed. + - If createVolumePermission on snapshot is currently set to Public i.e. I(group_names=all), + providing I(user_ids) will not make createVolumePermission Private unless I(create_volume_permission) is set to C(true). + - Mutually exclusive with I(group_names). + required: false + type: list + elements: str + version_added: 6.1.0 author: "Will Thames (@willthames)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Simple snapshot of volume using volume_id - amazon.aws.ec2_snapshot: volume_id: vol-abcdef12 @@ -96,8 +127,8 @@ EXAMPLES = ''' instance_id: i-12345678 device_name: /dev/sdb1 snapshot_tags: - frequency: hourly - source: /data + frequency: hourly + source: /data # Remove a snapshot - amazon.aws.ec2_snapshot: @@ -108,9 +139,47 @@ EXAMPLES = ''' - amazon.aws.ec2_snapshot: volume_id: vol-abcdef12 last_snapshot_min_age: 60 -''' -RETURN = ''' +- name: Reset snapshot createVolumePermission (change permission to "Private") + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + +- name: Modify snapshot createVolmePermission to add user IDs (specify purge_create_vol_permission=true to change permssion to "Private") + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + user_ids: + - '123456789012' + - '098765432109' + +- name: Modify snapshot createVolmePermission - remove all except specified user_ids + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '123456789012' + +- name: Replace (purge existing) snapshot createVolmePermission annd add user IDs + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '111111111111' + +- name: Modify snapshot createVolmePermission - make createVolumePermission "Public" + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - all +""" + +RETURN = r""" snapshot_id: description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created. type: str @@ -131,7 +200,7 @@ volume_size: type: int returned: always sample: 8 -''' +""" import datetime @@ -142,12 +211,12 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @@ -166,8 +235,8 @@ def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None): if not now: now = datetime.datetime.now(datetime.timezone.utc) - youngest_snapshot = max(snapshots, key=lambda s: s['StartTime']) - snapshot_start = youngest_snapshot['StartTime'] + youngest_snapshot = max(snapshots, key=lambda s: s["StartTime"]) + snapshot_start = youngest_snapshot["StartTime"] snapshot_age = now - snapshot_start if max_snapshot_age_secs is not None: @@ -179,23 +248,13 @@ def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None): def get_volume_by_instance(module, ec2, device_name, instance_id): try: - _filter = { - 'attachment.instance-id': instance_id, - 'attachment.device': device_name - } - volumes = ec2.describe_volumes( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(_filter) - )['Volumes'] + _filter = {"attachment.instance-id": instance_id, "attachment.device": device_name} + volumes = ec2.describe_volumes(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(_filter))["Volumes"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe Volume") if not volumes: - module.fail_json( - msg="Could not find volume with name {0} attached to instance {1}".format( - device_name, instance_id - ) - ) + module.fail_json(msg=f"Could not find volume with name {device_name} attached to instance {instance_id}") volume = volumes[0] return volume @@ -206,14 +265,12 @@ def get_volume_by_id(module, ec2, volume): volumes = ec2.describe_volumes( aws_retry=True, VolumeIds=[volume], - )['Volumes'] + )["Volumes"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe Volume") if not volumes: - module.fail_json( - msg="Could not find volume with id {0}".format(volume) - ) + module.fail_json(msg=f"Could not find volume with id {volume}") volume = volumes[0] return volume @@ -221,103 +278,105 @@ def get_volume_by_id(module, ec2, volume): @AWSRetry.jittered_backoff() def _describe_snapshots(ec2, **params): - paginator = ec2.get_paginator('describe_snapshots') + paginator = ec2.get_paginator("describe_snapshots") return paginator.paginate(**params).build_full_result() # Handle SnapshotCreationPerVolumeRateExceeded separately because we need a much # longer delay than normal -@AWSRetry.jittered_backoff(catch_extra_error_codes=['SnapshotCreationPerVolumeRateExceeded'], delay=15) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["SnapshotCreationPerVolumeRateExceeded"], delay=15) def _create_snapshot(ec2, **params): # Fast retry on common failures ('global' rate limits) return ec2.create_snapshot(aws_retry=True, **params) def get_snapshots_by_volume(module, ec2, volume_id): - _filter = {'volume-id': volume_id} + _filter = {"volume-id": volume_id} try: - results = _describe_snapshots( - ec2, - Filters=ansible_dict_to_boto3_filter_list(_filter) - ) + results = _describe_snapshots(ec2, Filters=ansible_dict_to_boto3_filter_list(_filter)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe snapshots from volume") - return results['Snapshots'] - - -def create_snapshot(module, ec2, description=None, wait=None, - wait_timeout=None, volume_id=None, instance_id=None, - snapshot_id=None, device_name=None, snapshot_tags=None, - last_snapshot_min_age=None): + return results["Snapshots"] + + +def create_snapshot( + module, + ec2, + description=None, + wait=None, + wait_timeout=None, + volume_id=None, + instance_id=None, + snapshot_id=None, + device_name=None, + snapshot_tags=None, + last_snapshot_min_age=None, +): snapshot = None changed = False if instance_id: - volume = get_volume_by_instance( - module, ec2, device_name, instance_id - ) - volume_id = volume['VolumeId'] + volume = get_volume_by_instance(module, ec2, device_name, instance_id) + volume_id = volume["VolumeId"] else: volume = get_volume_by_id(module, ec2, volume_id) - if 'Tags' not in volume: - volume['Tags'] = {} + if "Tags" not in volume: + volume["Tags"] = {} if last_snapshot_min_age > 0: current_snapshots = get_snapshots_by_volume(module, ec2, volume_id) last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds - snapshot = _get_most_recent_snapshot( - current_snapshots, - max_snapshot_age_secs=last_snapshot_min_age - ) + snapshot = _get_most_recent_snapshot(current_snapshots, max_snapshot_age_secs=last_snapshot_min_age) # Create a new snapshot if we didn't find an existing one to use if snapshot is None: - volume_tags = boto3_tag_list_to_ansible_dict(volume['Tags']) - volume_name = volume_tags.get('Name') + volume_tags = boto3_tag_list_to_ansible_dict(volume["Tags"]) + volume_name = volume_tags.get("Name") _tags = dict() if volume_name: - _tags['Name'] = volume_name + _tags["Name"] = volume_name if snapshot_tags: _tags.update(snapshot_tags) - params = {'VolumeId': volume_id} + params = {"VolumeId": volume_id} if description: - params['Description'] = description + params["Description"] = description if _tags: - params['TagSpecifications'] = [{ - 'ResourceType': 'snapshot', - 'Tags': ansible_dict_to_boto3_tag_list(_tags), - }] + params["TagSpecifications"] = [ + { + "ResourceType": "snapshot", + "Tags": ansible_dict_to_boto3_tag_list(_tags), + } + ] try: if module.check_mode: - module.exit_json(changed=True, msg='Would have created a snapshot if not in check mode', - volume_id=volume['VolumeId'], volume_size=volume['Size']) + module.exit_json( + changed=True, + msg="Would have created a snapshot if not in check mode", + volume_id=volume["VolumeId"], + volume_size=volume["Size"], + ) snapshot = _create_snapshot(ec2, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create snapshot") changed = True if wait: - waiter = get_waiter(ec2, 'snapshot_completed') + waiter = get_waiter(ec2, "snapshot_completed") try: - waiter.wait( - SnapshotIds=[snapshot['SnapshotId']], - WaiterConfig=dict(Delay=3, MaxAttempts=wait_timeout // 3) - ) + waiter.wait(SnapshotIds=[snapshot["SnapshotId"]], WaiterConfig=dict(Delay=3, MaxAttempts=wait_timeout // 3)) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timed out while creating snapshot') + module.fail_json_aws(e, msg="Timed out while creating snapshot") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws( - e, msg='Error while waiting for snapshot creation' - ) + module.fail_json_aws(e, msg="Error while waiting for snapshot creation") - _tags = boto3_tag_list_to_ansible_dict(snapshot['Tags']) + _tags = boto3_tag_list_to_ansible_dict(snapshot["Tags"]) _snapshot = camel_dict_to_snake_dict(snapshot) - _snapshot['tags'] = _tags + _snapshot["tags"] = _tags results = { - 'snapshot_id': snapshot['SnapshotId'], - 'volume_id': snapshot['VolumeId'], - 'volume_size': snapshot['VolumeSize'], - 'tags': _tags, - 'snapshots': [_snapshot], + "snapshot_id": snapshot["SnapshotId"], + "volume_id": snapshot["VolumeId"], + "volume_size": snapshot["VolumeSize"], + "tags": _tags, + "snapshots": [_snapshot], } module.exit_json(changed=changed, **results) @@ -327,20 +386,126 @@ def delete_snapshot(module, ec2, snapshot_id): if module.check_mode: try: _describe_snapshots(ec2, SnapshotIds=[(snapshot_id)]) - module.exit_json(changed=True, msg='Would have deleted snapshot if not in check mode') - except is_boto3_error_code('InvalidSnapshot.NotFound'): - module.exit_json(changed=False, msg='Invalid snapshot ID - snapshot not found') + module.exit_json(changed=True, msg="Would have deleted snapshot if not in check mode") + except is_boto3_error_code("InvalidSnapshot.NotFound"): + module.exit_json(changed=False, msg="Invalid snapshot ID - snapshot not found") try: ec2.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) - except is_boto3_error_code('InvalidSnapshot.NotFound'): + except is_boto3_error_code("InvalidSnapshot.NotFound"): module.exit_json(changed=False) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete snapshot") # successful delete module.exit_json(changed=True) +def _describe_snapshot_attribute(module, ec2, snapshot_id): + try: + response = ec2.describe_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe snapshot attribute createVolumePermission") + + return response["CreateVolumePermissions"] + + +def build_modify_createVolumePermission_params(module): + snapshot_id = module.params.get("snapshot_id") + user_ids = module.params.get("user_ids") + group_names = module.params.get("group_names") + + if not user_ids and not group_names: + module.fail_json(msg="Please provide either Group IDs or User IDs to modify permissions") + + params = { + "Attribute": "createVolumePermission", + "OperationType": "add", + "SnapshotId": snapshot_id, + "GroupNames": group_names, + "UserIds": user_ids, + } + + # remove empty value params + params = {k: v for k, v in params.items() if v} + + return params + + +def check_user_or_group_update_needed(module, ec2): + existing_create_vol_permission = _describe_snapshot_attribute(module, ec2, module.params.get("snapshot_id")) + purge_permission = module.params.get("purge_create_vol_permission") + supplied_group_names = module.params.get("group_names") + supplied_user_ids = module.params.get("user_ids") + + # if createVolumePermission is already "Public", adding "user_ids" is not needed + if any(item.get("Group") == "all" for item in existing_create_vol_permission) and not purge_permission: + return False + + if supplied_group_names: + existing_group_names = {item.get("Group") for item in existing_create_vol_permission or []} + if set(supplied_group_names) == set(existing_group_names): + return False + else: + return True + + if supplied_user_ids: + existing_user_ids = {item.get("UserId") for item in existing_create_vol_permission or []} + if set(supplied_user_ids) == set(existing_user_ids): + return False + else: + return True + + if purge_permission and existing_create_vol_permission == []: + return False + + return True + + +def _modify_snapshot_createVolumePermission(module, ec2, snapshot_id, purge_create_vol_permission): + update_needed = check_user_or_group_update_needed(module, ec2) + + if not update_needed: + module.exit_json(changed=False, msg="Supplied CreateVolumePermission already applied, update not needed") + + if purge_create_vol_permission is True: + _reset_snapshpot_attribute(module, ec2, snapshot_id) + if not module.params.get("user_ids") and not module.params.get("group_names"): + module.exit_json(changed=True, msg="Reset createVolumePermission successfully") + + params = build_modify_createVolumePermission_params(module) + + if module.check_mode: + module.exit_json(changed=True, msg="Would have modified CreateVolumePermission") + + try: + ec2.modify_snapshot_attribute(**params) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to modify createVolumePermission") + + module.exit_json(changed=True, msg="Successfully modified CreateVolumePermission") + + +def _reset_snapshpot_attribute(module, ec2, snapshot_id): + if module.check_mode: + module.exit_json(changed=True, msg="Would have reset CreateVolumePermission") + try: + response = ec2.reset_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to reset createVolumePermission") + + def create_snapshot_ansible_module(): argument_spec = dict( volume_id=dict(), @@ -348,23 +513,29 @@ def create_snapshot_ansible_module(): instance_id=dict(), snapshot_id=dict(), device_name=dict(), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - last_snapshot_min_age=dict(type='int', default=0), - snapshot_tags=dict(type='dict', default=dict()), - state=dict(choices=['absent', 'present'], default='present'), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + last_snapshot_min_age=dict(type="int", default=0), + snapshot_tags=dict(type="dict", default=dict()), + state=dict(choices=["absent", "present"], default="present"), + modify_create_vol_permission=dict(type="bool"), + purge_create_vol_permission=dict(type="bool", default=False), + user_ids=dict(type="list", elements="str"), + group_names=dict(type="list", elements="str", choices=["all"]), ) mutually_exclusive = [ - ('instance_id', 'snapshot_id', 'volume_id'), + ("instance_id", "snapshot_id", "volume_id"), + ("group_names", "user_ids"), ] required_if = [ - ('state', 'absent', ('snapshot_id',)), + ("state", "absent", ("snapshot_id",)), + ("purge_create_vol_permission", True, ("modify_create_vol_permission",)), ] required_one_of = [ - ('instance_id', 'snapshot_id', 'volume_id'), + ("instance_id", "snapshot_id", "volume_id"), ] required_together = [ - ('instance_id', 'device_name'), + ("instance_id", "device_name"), ] module = AnsibleAWSModule( @@ -382,26 +553,30 @@ def create_snapshot_ansible_module(): def main(): module = create_snapshot_ansible_module() - volume_id = module.params.get('volume_id') - snapshot_id = module.params.get('snapshot_id') - description = module.params.get('description') - instance_id = module.params.get('instance_id') - device_name = module.params.get('device_name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - last_snapshot_min_age = module.params.get('last_snapshot_min_age') - snapshot_tags = module.params.get('snapshot_tags') - state = module.params.get('state') - - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) - - if state == 'absent': + volume_id = module.params.get("volume_id") + snapshot_id = module.params.get("snapshot_id") + description = module.params.get("description") + instance_id = module.params.get("instance_id") + device_name = module.params.get("device_name") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + last_snapshot_min_age = module.params.get("last_snapshot_min_age") + snapshot_tags = module.params.get("snapshot_tags") + state = module.params.get("state") + modify_create_vol_permission = module.params.get("modify_create_vol_permission") + purge_create_vol_permission = module.params.get("purge_create_vol_permission") + + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + if state == "absent": delete_snapshot( module=module, ec2=ec2, snapshot_id=snapshot_id, ) - else: + elif modify_create_vol_permission is True: + _modify_snapshot_createVolumePermission(module, ec2, snapshot_id, purge_create_vol_permission) + elif state == "present": create_snapshot( module=module, description=description, @@ -417,5 +592,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py index 2b7b51158..f2db12cbb 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_snapshot_info version_added: 1.0.0 @@ -70,12 +68,12 @@ notes: the account use the filter 'owner-id'. extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws + - amazon.aws.region.modules + - amazon.aws.common.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all snapshots, including public ones @@ -110,10 +108,9 @@ EXAMPLES = r''' - amazon.aws.ec2_snapshot_info: filters: status: error +""" -''' - -RETURN = r''' +RETURN = r""" snapshots: description: List of snapshots retrieved with their respective info. type: list @@ -197,99 +194,139 @@ snapshots: type: str returned: always sample: "arn:aws:kms:ap-southeast-2:123456789012:key/74c9742a-a1b2-45cb-b3fe-abcdef123456" + create_volume_permissions: + description: + - The users and groups that have the permissions for creating volumes from the snapshot. + - The module will return empty list if the create volume permissions on snapshot are 'private'. + type: list + elements: dict + sample: [{"group": "all"}] next_token_id: description: - Contains the value returned from a previous paginated request where C(max_results) was used and the results exceeded the value of that parameter. - This value is null when there are no more results to return. type: str returned: when option C(max_results) is set in input -''' +""" try: + from botocore.exceptions import BotoCoreError from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +def build_request_args(snapshot_ids, owner_ids, restorable_by_user_ids, filters, max_results, next_token_id): + request_args = { + "Filters": ansible_dict_to_boto3_filter_list(filters), + "MaxResults": max_results, + "NextToken": next_token_id, + "OwnerIds": owner_ids, + "RestorableByUserIds": [str(user_id) for user_id in restorable_by_user_ids], + "SnapshotIds": snapshot_ids, + } -def list_ec2_snapshots(connection, module): + request_args = {k: v for k, v in request_args.items() if v} - snapshot_ids = module.params.get("snapshot_ids") - owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")] - restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")] - filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - max_results = module.params.get('max_results') - next_token = module.params.get('next_token_id') - optional_param = {} - if max_results: - optional_param['MaxResults'] = max_results - if next_token: - optional_param['NextToken'] = next_token + return request_args + +def get_snapshots(connection, module, request_args): + snapshot_ids = request_args.get("SnapshotIds") try: - snapshots = connection.describe_snapshots( - aws_retry=True, - SnapshotIds=snapshot_ids, OwnerIds=owner_ids, - RestorableByUserIds=restorable_by_user_ids, Filters=filters, - **optional_param) - except is_boto3_error_code('InvalidSnapshot.NotFound') as e: + snapshots = connection.describe_snapshots(aws_retry=True, **request_args) + except is_boto3_error_code("InvalidSnapshot.NotFound") as e: if len(snapshot_ids) > 1: - module.warn("Some of your snapshots may exist, but %s" % str(e)) - snapshots = {'Snapshots': []} + module.warn(f"Some of your snapshots may exist, but {str(e)}") + snapshots = {"Snapshots": []} + + return snapshots + + +def _describe_snapshot_attribute(module, ec2, snapshot_id): + try: + response = ec2.describe_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe snapshot attribute createVolumePermission") + + return response["CreateVolumePermissions"] + + +def list_ec2_snapshots(connection, module, request_args): + try: + snapshots = get_snapshots(connection, module, request_args) except ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to describe snapshots') + module.fail_json_aws(e, msg="Failed to describe snapshots") result = {} + + # Add createVolumePermission info to snapshots result + for snapshot in snapshots["Snapshots"]: + snapshot_id = snapshot.get("SnapshotId") + create_vol_permission = _describe_snapshot_attribute(module, connection, snapshot_id) + snapshot["CreateVolumePermissions"] = create_vol_permission + # Turn the boto3 result in to ansible_friendly_snaked_names snaked_snapshots = [] - for snapshot in snapshots['Snapshots']: + for snapshot in snapshots["Snapshots"]: snaked_snapshots.append(camel_dict_to_snake_dict(snapshot)) # Turn the boto3 result in to ansible friendly tag dictionary for snapshot in snaked_snapshots: - if 'tags' in snapshot: - snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value') + if "tags" in snapshot: + snapshot["tags"] = boto3_tag_list_to_ansible_dict(snapshot["tags"], "key", "value") - result['snapshots'] = snaked_snapshots + result["snapshots"] = snaked_snapshots - if snapshots.get('NextToken'): - result.update(camel_dict_to_snake_dict({'NextTokenId': snapshots.get('NextToken')})) + if snapshots.get("NextToken"): + result.update(camel_dict_to_snake_dict({"NextTokenId": snapshots.get("NextToken")})) - module.exit_json(**result) + return result def main(): - argument_spec = dict( - snapshot_ids=dict(default=[], type='list', elements='str'), - owner_ids=dict(default=[], type='list', elements='str'), - restorable_by_user_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict'), - max_results=dict(type='int'), - next_token_id=dict(type='str') + filters=dict(default={}, type="dict"), + max_results=dict(type="int"), + next_token_id=dict(type="str"), + owner_ids=dict(default=[], type="list", elements="str"), + restorable_by_user_ids=dict(default=[], type="list", elements="str"), + snapshot_ids=dict(default=[], type="list", elements="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'], - ['snapshot_ids', 'max_results'], - ['snapshot_ids', 'next_token_id'] + ["snapshot_ids", "owner_ids", "restorable_by_user_ids", "filters"], + ["snapshot_ids", "max_results"], + ["snapshot_ids", "next_token_id"], ], - supports_check_mode=True + supports_check_mode=True, ) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + request_args = build_request_args( + filters=module.params["filters"], + max_results=module.params["max_results"], + next_token_id=module.params["next_token_id"], + owner_ids=module.params["owner_ids"], + restorable_by_user_ids=module.params["restorable_by_user_ids"], + snapshot_ids=module.params["snapshot_ids"], + ) - list_ec2_snapshots(connection, module) + result = list_ec2_snapshots(connection, module, request_args) + + module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py index a5d8f2ca8..1bd564724 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_spot_instance version_added: 2.0.0 @@ -290,12 +288,12 @@ options: type: bool version_added: 5.4.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Simple Spot Request Creation @@ -315,22 +313,22 @@ EXAMPLES = ''' block_device_mappings: - device_name: /dev/sdb ebs: - delete_on_termination: True + delete_on_termination: true volume_type: gp3 volume_size: 5 - device_name: /dev/sdc ebs: - delete_on_termination: True + delete_on_termination: true volume_type: io2 volume_size: 30 network_interfaces: - - associate_public_ip_address: False - delete_on_termination: True + - associate_public_ip_address: false + delete_on_termination: true device_index: 0 placement: availability_zone: us-west-2a monitoring: - enabled: False + enabled: false spot_price: 0.002 tags: Environment: Testing @@ -339,9 +337,9 @@ EXAMPLES = ''' amazon.aws.ec2_spot_instance: spot_instance_request_ids: ['sir-12345678', 'sir-abcdefgh'] state: absent -''' +""" -RETURN = ''' +RETURN = r""" spot_request: description: The spot instance request details after creation returned: when success @@ -405,7 +403,8 @@ cancelled_spot_request: returned: always type: str sample: 'Spot requests with IDs: sir-1234abcd have been cancelled' -''' +""" + # TODO: add support for datetime-based parameters # import datetime # import time @@ -414,13 +413,14 @@ try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict def build_launch_specification(launch_spec): @@ -435,29 +435,27 @@ def build_launch_specification(launch_spec): """ assigned_keys = dict((k, v) for k, v in launch_spec.items() if v is not None) - sub_key_to_build = ['placement', 'iam_instance_profile', 'monitoring'] + sub_key_to_build = ["placement", "iam_instance_profile", "monitoring"] for subkey in sub_key_to_build: if launch_spec[subkey] is not None: assigned_keys[subkey] = dict((k, v) for k, v in launch_spec[subkey].items() if v is not None) - if launch_spec['network_interfaces'] is not None: + if launch_spec["network_interfaces"] is not None: interfaces = [] - for iface in launch_spec['network_interfaces']: + for iface in launch_spec["network_interfaces"]: interfaces.append(dict((k, v) for k, v in iface.items() if v is not None)) - assigned_keys['network_interfaces'] = interfaces + assigned_keys["network_interfaces"] = interfaces - if launch_spec['block_device_mappings'] is not None: + if launch_spec["block_device_mappings"] is not None: block_devs = [] - for dev in launch_spec['block_device_mappings']: - block_devs.append( - dict((k, v) for k, v in dev.items() if v is not None)) - assigned_keys['block_device_mappings'] = block_devs + for dev in launch_spec["block_device_mappings"]: + block_devs.append(dict((k, v) for k, v in dev.items() if v is not None)) + assigned_keys["block_device_mappings"] = block_devs return snake_dict_to_camel_dict(assigned_keys, capitalize_first=True) def request_spot_instances(module, connection): - # connection.request_spot_instances() always creates a new spot request changed = True @@ -466,83 +464,95 @@ def request_spot_instances(module, connection): params = {} - if module.params.get('launch_specification'): - params['LaunchSpecification'] = build_launch_specification(module.params.get('launch_specification')) + if module.params.get("launch_specification"): + params["LaunchSpecification"] = build_launch_specification(module.params.get("launch_specification")) - if module.params.get('zone_group'): - params['AvailabilityZoneGroup'] = module.params.get('zone_group') + if module.params.get("zone_group"): + params["AvailabilityZoneGroup"] = module.params.get("zone_group") - if module.params.get('count'): - params['InstanceCount'] = module.params.get('count') + if module.params.get("count"): + params["InstanceCount"] = module.params.get("count") - if module.params.get('launch_group'): - params['LaunchGroup'] = module.params.get('launch_group') + if module.params.get("launch_group"): + params["LaunchGroup"] = module.params.get("launch_group") - if module.params.get('spot_price'): - params['SpotPrice'] = module.params.get('spot_price') + if module.params.get("spot_price"): + params["SpotPrice"] = module.params.get("spot_price") - if module.params.get('spot_type'): - params['Type'] = module.params.get('spot_type') + if module.params.get("spot_type"): + params["Type"] = module.params.get("spot_type") - if module.params.get('client_token'): - params['ClientToken'] = module.params.get('client_token') + if module.params.get("client_token"): + params["ClientToken"] = module.params.get("client_token") - if module.params.get('interruption'): - params['InstanceInterruptionBehavior'] = module.params.get('interruption') + if module.params.get("interruption"): + params["InstanceInterruptionBehavior"] = module.params.get("interruption") - if module.params.get('tags'): - params['TagSpecifications'] = [{ - 'ResourceType': 'spot-instances-request', - 'Tags': ansible_dict_to_boto3_tag_list(module.params.get('tags')), - }] + if module.params.get("tags"): + params["TagSpecifications"] = [ + { + "ResourceType": "spot-instances-request", + "Tags": ansible_dict_to_boto3_tag_list(module.params.get("tags")), + } + ] # TODO: add support for datetime-based parameters # params['ValidFrom'] = module.params.get('valid_from') # params['ValidUntil'] = module.params.get('valid_until') try: - request_spot_instance_response = (connection.request_spot_instances(aws_retry=True, **params))['SpotInstanceRequests'][0] + request_spot_instance_response = (connection.request_spot_instances(aws_retry=True, **params))[ + "SpotInstanceRequests" + ][0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while creating the spot instance request') + module.fail_json_aws(e, msg="Error while creating the spot instance request") - request_spot_instance_response['Tags'] = boto3_tag_list_to_ansible_dict(request_spot_instance_response.get('Tags', [])) - spot_request = camel_dict_to_snake_dict(request_spot_instance_response, ignore_list=['Tags']) + request_spot_instance_response["Tags"] = boto3_tag_list_to_ansible_dict( + request_spot_instance_response.get("Tags", []) + ) + spot_request = camel_dict_to_snake_dict(request_spot_instance_response, ignore_list=["Tags"]) module.exit_json(spot_request=spot_request, changed=changed) def cancel_spot_instance_requests(module, connection): - changed = False - spot_instance_request_ids = module.params.get('spot_instance_request_ids') + spot_instance_request_ids = module.params.get("spot_instance_request_ids") requests_exist = dict() try: - paginator = connection.get_paginator('describe_spot_instance_requests').paginate(SpotInstanceRequestIds=spot_instance_request_ids, - Filters=[{'Name': 'state', 'Values': ['open', 'active']}]) + paginator = connection.get_paginator("describe_spot_instance_requests").paginate( + SpotInstanceRequestIds=spot_instance_request_ids, Filters=[{"Name": "state", "Values": ["open", "active"]}] + ) jittered_retry = AWSRetry.jittered_backoff() requests_exist = jittered_retry(paginator.build_full_result)() - except is_boto3_error_code('InvalidSpotInstanceRequestID.NotFound'): - requests_exist['SpotInstanceRequests'] = [] - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidSpotInstanceRequestID.NotFound"): + requests_exist["SpotInstanceRequests"] = [] + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failure when describing spot requests") try: - if len(requests_exist['SpotInstanceRequests']) > 0: + if len(requests_exist["SpotInstanceRequests"]) > 0: changed = True if module.check_mode: - module.exit_json(changed=changed, - msg='Would have cancelled Spot request {0}'.format(spot_instance_request_ids)) + module.exit_json(changed=changed, msg=f"Would have cancelled Spot request {spot_instance_request_ids}") - connection.cancel_spot_instance_requests(aws_retry=True, SpotInstanceRequestIds=module.params.get('spot_instance_request_ids')) + connection.cancel_spot_instance_requests( + aws_retry=True, SpotInstanceRequestIds=module.params.get("spot_instance_request_ids") + ) if module.params.get("terminate_instances") is True: associated_instances = [request["InstanceId"] for request in requests_exist["SpotInstanceRequests"]] terminate_associated_instances(connection, module, associated_instances) - module.exit_json(changed=changed, msg='Cancelled Spot request {0}'.format(module.params.get('spot_instance_request_ids'))) + module.exit_json( + changed=changed, msg=f"Cancelled Spot request {module.params.get('spot_instance_request_ids')}" + ) else: - module.exit_json(changed=changed, msg='Spot request not found or already cancelled') + module.exit_json(changed=changed, msg="Spot request not found or already cancelled") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while cancelling the spot instance request') + module.fail_json_aws(e, msg="Error while cancelling the spot instance request") def terminate_associated_instances(connection, module, instance_ids): @@ -554,97 +564,89 @@ def terminate_associated_instances(connection, module, instance_ids): def main(): network_interface_options = dict( - associate_public_ip_address=dict(type='bool'), - delete_on_termination=dict(type='bool'), - description=dict(type='str'), - device_index=dict(type='int'), - groups=dict(type='list', elements='str'), - ipv6_address_count=dict(type='int'), - ipv6_addresses=dict(type='list', elements='dict', options=dict(ipv6address=dict(type='str'))), - network_interface_id=dict(type='str'), - private_ip_address=dict(type='str'), - private_ip_addresses=dict(type='list', elements='dict'), - secondary_private_ip_address_count=dict(type='int'), - subnet_id=dict(type='str'), - associate_carrier_ip_address=dict(type='bool'), - interface_type=dict(type='str', choices=['interface', 'efa']), - network_card_index=dict(type='int'), - ipv4_prefixes=dict(type='list', elements='dict'), - ipv4_prefix_count=dict(type='int'), - ipv6_prefixes=dict(type='list', elements='dict'), - ipv6_prefix_count=dict(type='int') + associate_public_ip_address=dict(type="bool"), + delete_on_termination=dict(type="bool"), + description=dict(type="str"), + device_index=dict(type="int"), + groups=dict(type="list", elements="str"), + ipv6_address_count=dict(type="int"), + ipv6_addresses=dict(type="list", elements="dict", options=dict(ipv6address=dict(type="str"))), + network_interface_id=dict(type="str"), + private_ip_address=dict(type="str"), + private_ip_addresses=dict(type="list", elements="dict"), + secondary_private_ip_address_count=dict(type="int"), + subnet_id=dict(type="str"), + associate_carrier_ip_address=dict(type="bool"), + interface_type=dict(type="str", choices=["interface", "efa"]), + network_card_index=dict(type="int"), + ipv4_prefixes=dict(type="list", elements="dict"), + ipv4_prefix_count=dict(type="int"), + ipv6_prefixes=dict(type="list", elements="dict"), + ipv6_prefix_count=dict(type="int"), ) block_device_mappings_options = dict( - device_name=dict(type='str'), - virtual_name=dict(type='str'), - ebs=dict(type='dict'), - no_device=dict(type='str'), - ) - monitoring_options = dict( - enabled=dict(type='bool', default=False) + device_name=dict(type="str"), + virtual_name=dict(type="str"), + ebs=dict(type="dict"), + no_device=dict(type="str"), ) + monitoring_options = dict(enabled=dict(type="bool", default=False)) placement_options = dict( - availability_zone=dict(type='str'), - group_name=dict(type='str'), - tenancy=dict(type='str', choices=['default', 'dedicated', 'host'], default='default') - ) - iam_instance_profile_options = dict( - arn=dict(type='str'), - name=dict(type='str') + availability_zone=dict(type="str"), + group_name=dict(type="str"), + tenancy=dict(type="str", choices=["default", "dedicated", "host"], default="default"), ) + iam_instance_profile_options = dict(arn=dict(type="str"), name=dict(type="str")) launch_specification_options = dict( - security_group_ids=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - block_device_mappings=dict(type='list', elements='dict', options=block_device_mappings_options), - ebs_optimized=dict(type='bool', default=False), - iam_instance_profile=dict(type='dict', options=iam_instance_profile_options), - image_id=dict(type='str'), - instance_type=dict(type='str'), - kernel_id=dict(type='str'), - key_name=dict(type='str'), - monitoring=dict(type='dict', options=monitoring_options), - network_interfaces=dict(type='list', elements='dict', options=network_interface_options, default=[]), - placement=dict(type='dict', options=placement_options), - ramdisk_id=dict(type='str'), - user_data=dict(type='str'), - subnet_id=dict(type='str') + security_group_ids=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + block_device_mappings=dict(type="list", elements="dict", options=block_device_mappings_options), + ebs_optimized=dict(type="bool", default=False), + iam_instance_profile=dict(type="dict", options=iam_instance_profile_options), + image_id=dict(type="str"), + instance_type=dict(type="str"), + kernel_id=dict(type="str"), + key_name=dict(type="str"), + monitoring=dict(type="dict", options=monitoring_options), + network_interfaces=dict(type="list", elements="dict", options=network_interface_options, default=[]), + placement=dict(type="dict", options=placement_options), + ramdisk_id=dict(type="str"), + user_data=dict(type="str"), + subnet_id=dict(type="str"), ) argument_spec = dict( - zone_group=dict(type='str'), - client_token=dict(type='str', no_log=False), - count=dict(type='int', default=1), - interruption=dict(type='str', default="terminate", choices=['hibernate', 'stop', 'terminate']), - launch_group=dict(type='str'), - launch_specification=dict(type='dict', options=launch_specification_options), - state=dict(default='present', choices=['present', 'absent']), - spot_price=dict(type='str'), - spot_type=dict(default='one-time', choices=["one-time", "persistent"]), - tags=dict(type='dict'), + zone_group=dict(type="str"), + client_token=dict(type="str", no_log=False), + count=dict(type="int", default=1), + interruption=dict(type="str", default="terminate", choices=["hibernate", "stop", "terminate"]), + launch_group=dict(type="str"), + launch_specification=dict(type="dict", options=launch_specification_options), + state=dict(default="present", choices=["present", "absent"]), + spot_price=dict(type="str"), + spot_type=dict(default="one-time", choices=["one-time", "persistent"]), + tags=dict(type="dict"), # valid_from=dict(type='datetime', default=datetime.datetime.now()), # valid_until=dict(type='datetime', default=(datetime.datetime.now() + datetime.timedelta(minutes=60)) spot_instance_request_ids=dict(type="list", elements="str"), terminate_instances=dict(type="bool", default="False"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params["state"] if module.params.get("terminate_instances") and state != "absent": module.fail_json("terminate_instances can only be used when state is absent.") - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - if state == 'present': + if state == "present": request_spot_instances(module, connection) - if state == 'absent': + if state == "absent": cancel_spot_instance_requests(module, connection) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py index 599db778b..7dc4abce8 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_spot_instance_info version_added: 2.0.0 @@ -33,12 +31,12 @@ options: default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: describe the Spot Instance requests based on request IDs @@ -53,17 +51,16 @@ EXAMPLES = ''' - sir-13579246 - sir-87654321 filters: - launch.instance-type: t3.medium + launch.instance-type: t3.medium - name: describe the Spot requests filtered using multiple filters amazon.aws.ec2_spot_instance_info: filters: - state: active - launch.block-device-mapping.device-name: /dev/sdb + state: active + launch.block-device-mapping.device-name: /dev/sdb +""" -''' - -RETURN = ''' +RETURN = r""" spot_request: description: The gathered information about specified spot instance requests. returned: when success @@ -237,65 +234,62 @@ spot_request: "type": "one-time", "valid_until": "2021-09-08T21:05:57+00:00" } -''' - +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def _describe_spot_instance_requests(connection, **params): - paginator = connection.get_paginator('describe_spot_instance_requests') + paginator = connection.get_paginator("describe_spot_instance_requests") return paginator.paginate(**params).build_full_result() def describe_spot_instance_requests(connection, module): - params = {} - if module.params.get('filters'): - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - if module.params.get('spot_instance_request_ids'): - params['SpotInstanceRequestIds'] = module.params.get('spot_instance_request_ids') + if module.params.get("filters"): + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("spot_instance_request_ids"): + params["SpotInstanceRequestIds"] = module.params.get("spot_instance_request_ids") try: - describe_spot_instance_requests_response = _describe_spot_instance_requests(connection, **params)['SpotInstanceRequests'] + describe_spot_instance_requests_response = _describe_spot_instance_requests(connection, **params)[ + "SpotInstanceRequests" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe spot instance requests') + module.fail_json_aws(e, msg="Failed to describe spot instance requests") spot_request = [] for response_list_item in describe_spot_instance_requests_response: spot_request.append(camel_dict_to_snake_dict(response_list_item)) if len(spot_request) == 0: - module.exit_json(msg='No spot requests found for specified options') + module.exit_json(msg="No spot requests found for specified options") module.exit_json(spot_request=spot_request) def main(): - argument_spec = dict( - filters=dict(default={}, type='dict'), - spot_instance_request_ids=dict(default=[], type='list', elements='str'), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True + filters=dict(default={}, type="dict"), + spot_instance_request_ids=dict(default=[], type="list", elements="str"), ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") describe_spot_instance_requests(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py b/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py index 6ccf687e3..9773325c7 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_tag version_added: 1.0.0 @@ -48,12 +46,12 @@ author: - Lester Wade (@lwade) - Paul Arthur (@flowerysong) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure tags are present on a resource amazon.aws.ec2_tag: region: eu-west-1 @@ -65,7 +63,7 @@ EXAMPLES = ''' - name: Ensure all volumes are tagged amazon.aws.ec2_tag: - region: eu-west-1 + region: eu-west-1 resource: '{{ item.id }}' state: present tags: @@ -94,12 +92,12 @@ EXAMPLES = ''' region: eu-west-1 resource: i-xxxxxxxxxxxxxxxxx tags: - Name: '' + Name: '' state: absent purge_tags: true -''' +""" -RETURN = ''' +RETURN = r""" tags: description: A dict containing the tags on the resource returned: always @@ -112,56 +110,56 @@ removed_tags: description: A dict of tags that were removed from the resource returned: If tags were removed type: dict -''' +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import remove_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def main(): argument_spec = dict( resource=dict(required=True), - tags=dict(type='dict', required=True), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), + tags=dict(type="dict", required=True), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = module.params['resource'] - tags = module.params['tags'] - state = module.params['state'] - purge_tags = module.params['purge_tags'] + resource = module.params["resource"] + tags = module.params["tags"] + state = module.params["state"] + purge_tags = module.params["purge_tags"] - result = {'changed': False} + result = {"changed": False} - ec2 = module.client('ec2') + ec2 = module.client("ec2") current_tags = describe_ec2_tags(ec2, module, resource) - if state == 'absent': + if state == "absent": removed_tags = {} for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): - result['changed'] = True + result["changed"] = True removed_tags[key] = current_tags[key] - result['removed_tags'] = removed_tags + result["removed_tags"] = removed_tags remove_ec2_tags(ec2, module, resource, removed_tags.keys()) - if state == 'present': + if state == "present": tags_to_set, tags_to_unset = compare_aws_tags(current_tags, tags, purge_tags) if tags_to_unset: - result['removed_tags'] = {} + result["removed_tags"] = {} for key in tags_to_unset: - result['removed_tags'][key] = current_tags[key] - result['added_tags'] = tags_to_set - result['changed'] = ensure_ec2_tags(ec2, module, resource, tags=tags, purge_tags=purge_tags) + result["removed_tags"][key] = current_tags[key] + result["added_tags"] = tags_to_set + result["changed"] = ensure_ec2_tags(ec2, module, resource, tags=tags, purge_tags=purge_tags) - result['tags'] = describe_ec2_tags(ec2, module, resource) + result["tags"] = describe_ec2_tags(ec2, module, resource) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py index 6be536562..1efcd5582 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_tag_info version_added: 1.0.0 @@ -25,12 +23,12 @@ options: author: - Mark Chappell (@tremble) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Retrieve all tags on an instance amazon.aws.ec2_tag_info: region: eu-west-1 @@ -42,17 +40,17 @@ EXAMPLES = ''' region: eu-west-1 resource: vpc-xxxxxxxxxxxxxxxxx register: vpc_tags -''' +""" -RETURN = ''' +RETURN = r""" tags: description: A dict containing the tags on the resource returned: always type: dict -''' +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def main(): @@ -61,13 +59,13 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = module.params['resource'] - ec2 = module.client('ec2') + resource = module.params["resource"] + ec2 = module.client("ec2") current_tags = describe_ec2_tags(ec2, module, resource) module.exit_json(changed=False, tags=current_tags) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py index 8afbc6e53..6fa2ca47b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vol version_added: 1.0.0 @@ -112,13 +109,13 @@ author: notes: - Support for I(purge_tags) was added in release 1.5.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Simple attachment action - amazon.aws.ec2_vol: instance: XXXXXX @@ -204,9 +201,9 @@ EXAMPLES = ''' id: XXXXXX device_name: /dev/sdf delete_on_termination: true -''' +""" -RETURN = ''' +RETURN = r""" device: description: device name of attached volume returned: when success @@ -247,21 +244,21 @@ volume: "type": "standard", "zone": "us-east-1b" } -''' +""" import time +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications - +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list try: import botocore @@ -276,17 +273,17 @@ def get_instance(module, ec2_conn, instance_id=None): try: reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance_id]) - instance = camel_dict_to_snake_dict(reservation_response['Reservations'][0]['Instances'][0]) + instance = camel_dict_to_snake_dict(reservation_response["Reservations"][0]["Instances"][0]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while getting instance_id with id {0}'.format(instance)) + module.fail_json_aws(e, msg=f"Error while getting instance_id with id {instance}") return instance def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True): - name = module.params.get('name') - param_id = module.params.get('id') - zone = module.params.get('zone') + name = module.params.get("name") + param_id = module.params.get("id") + zone = module.params.get("zone") if not vol_id: vol_id = param_id @@ -299,52 +296,52 @@ def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True): vols = [] if vol_id: - find_params['VolumeIds'] = [vol_id] + find_params["VolumeIds"] = [vol_id] elif name: - find_params['Filters'] = ansible_dict_to_boto3_filter_list({'tag:Name': name}) + find_params["Filters"] = ansible_dict_to_boto3_filter_list({"tag:Name": name}) elif zone: - find_params['Filters'] = ansible_dict_to_boto3_filter_list({'availability-zone': zone}) + find_params["Filters"] = ansible_dict_to_boto3_filter_list({"availability-zone": zone}) try: - paginator = ec2_conn.get_paginator('describe_volumes') + paginator = ec2_conn.get_paginator("describe_volumes") vols_response = paginator.paginate(**find_params) - vols = list(vols_response)[0].get('Volumes') + vols = list(vols_response)[0].get("Volumes") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if is_boto3_error_code('InvalidVolume.NotFound'): - module.exit_json(msg="Volume {0} does not exist".format(vol_id), changed=False) - module.fail_json_aws(e, msg='Error while getting EBS volumes with the parameters {0}'.format(find_params)) + if is_boto3_error_code("InvalidVolume.NotFound"): + module.exit_json(msg=f"Volume {vol_id} does not exist", changed=False) + module.fail_json_aws(e, msg=f"Error while getting EBS volumes with the parameters {find_params}") if not vols: if fail_on_not_found and vol_id: - msg = "Could not find volume with id: {0}".format(vol_id) + msg = f"Could not find volume with id: {vol_id}" if name: - msg += (" and name: {0}".format(name)) + msg += f" and name: {name}" module.fail_json(msg=msg) else: return None if len(vols) > 1: module.fail_json( - msg="Found more than one volume in zone (if specified) with name: {0}".format(name), - found=[v['VolumeId'] for v in vols] + msg=f"Found more than one volume in zone (if specified) with name: {name}", + found=[v["VolumeId"] for v in vols], ) vol = camel_dict_to_snake_dict(vols[0]) return vol def get_volumes(module, ec2_conn): - instance = module.params.get('instance') + instance = module.params.get("instance") find_params = dict() if instance: - find_params['Filters'] = ansible_dict_to_boto3_filter_list({'attachment.instance-id': instance}) + find_params["Filters"] = ansible_dict_to_boto3_filter_list({"attachment.instance-id": instance}) vols = [] try: vols_response = ec2_conn.describe_volumes(aws_retry=True, **find_params) - vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get('Volumes', [])] + vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get("Volumes", [])] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while getting EBS volumes') + module.fail_json_aws(e, msg="Error while getting EBS volumes") return vols @@ -354,170 +351,166 @@ def delete_volume(module, ec2_conn, volume_id=None): try: ec2_conn.delete_volume(aws_retry=True, VolumeId=volume_id) changed = True - except is_boto3_error_code('InvalidVolume.NotFound'): + except is_boto3_error_code("InvalidVolume.NotFound"): module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Error while deleting volume') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error while deleting volume") return changed def update_volume(module, ec2_conn, volume): changed = False - req_obj = {'VolumeId': volume['volume_id']} + req_obj = {"VolumeId": volume["volume_id"]} - if module.params.get('modify_volume'): - target_type = module.params.get('volume_type') + if module.params.get("modify_volume"): + target_type = module.params.get("volume_type") original_type = None type_changed = False if target_type: - original_type = volume['volume_type'] + original_type = volume["volume_type"] if target_type != original_type: type_changed = True - req_obj['VolumeType'] = target_type + req_obj["VolumeType"] = target_type iops_changed = False - target_iops = module.params.get('iops') - original_iops = volume.get('iops') + target_iops = module.params.get("iops") + original_iops = volume.get("iops") if target_iops: if target_iops != original_iops: iops_changed = True - req_obj['Iops'] = target_iops + req_obj["Iops"] = target_iops else: - req_obj['Iops'] = original_iops + req_obj["Iops"] = original_iops else: # If no IOPS value is specified and there was a volume_type update to gp3, # the existing value is retained, unless a volume type is modified that supports different values, # otherwise, the default iops value is applied. - if type_changed and target_type == 'gp3': - if ( - (original_iops and (int(original_iops) < 3000 or int(original_iops) > 16000)) or not original_iops - ): - req_obj['Iops'] = 3000 + if type_changed and target_type == "gp3": + if (original_iops and (int(original_iops) < 3000 or int(original_iops) > 16000)) or not original_iops: + req_obj["Iops"] = 3000 iops_changed = True - target_size = module.params.get('volume_size') + target_size = module.params.get("volume_size") size_changed = False if target_size: - original_size = volume['size'] + original_size = volume["size"] if target_size != original_size: size_changed = True - req_obj['Size'] = target_size + req_obj["Size"] = target_size - target_type = module.params.get('volume_type') + target_type = module.params.get("volume_type") original_type = None type_changed = False if target_type: - original_type = volume['volume_type'] + original_type = volume["volume_type"] if target_type != original_type: type_changed = True - req_obj['VolumeType'] = target_type + req_obj["VolumeType"] = target_type - target_throughput = module.params.get('throughput') + target_throughput = module.params.get("throughput") throughput_changed = False if target_throughput: - original_throughput = volume.get('throughput') + original_throughput = volume.get("throughput") if target_throughput != original_throughput: throughput_changed = True - req_obj['Throughput'] = target_throughput + req_obj["Throughput"] = target_throughput - target_multi_attach = module.params.get('multi_attach') + target_multi_attach = module.params.get("multi_attach") multi_attach_changed = False if target_multi_attach is not None: - original_multi_attach = volume['multi_attach_enabled'] + original_multi_attach = volume["multi_attach_enabled"] if target_multi_attach != original_multi_attach: multi_attach_changed = True - req_obj['MultiAttachEnabled'] = target_multi_attach + req_obj["MultiAttachEnabled"] = target_multi_attach changed = iops_changed or size_changed or type_changed or throughput_changed or multi_attach_changed if changed: if module.check_mode: - module.exit_json(changed=True, msg='Would have updated volume if not in check mode.') + module.exit_json(changed=True, msg="Would have updated volume if not in check mode.") response = ec2_conn.modify_volume(**req_obj) - volume['size'] = response.get('VolumeModification').get('TargetSize') - volume['volume_type'] = response.get('VolumeModification').get('TargetVolumeType') - volume['iops'] = response.get('VolumeModification').get('TargetIops') - volume['multi_attach_enabled'] = response.get('VolumeModification').get('TargetMultiAttachEnabled') - volume['throughput'] = response.get('VolumeModification').get('TargetThroughput') + volume["size"] = response.get("VolumeModification").get("TargetSize") + volume["volume_type"] = response.get("VolumeModification").get("TargetVolumeType") + volume["iops"] = response.get("VolumeModification").get("TargetIops") + volume["multi_attach_enabled"] = response.get("VolumeModification").get("TargetMultiAttachEnabled") + volume["throughput"] = response.get("VolumeModification").get("TargetThroughput") return volume, changed def create_volume(module, ec2_conn, zone): changed = False - iops = module.params.get('iops') - encrypted = module.params.get('encrypted') - kms_key_id = module.params.get('kms_key_id') - volume_size = module.params.get('volume_size') - volume_type = module.params.get('volume_type') - snapshot = module.params.get('snapshot') - throughput = module.params.get('throughput') - multi_attach = module.params.get('multi_attach') - outpost_arn = module.params.get('outpost_arn') - tags = module.params.get('tags') or {} - name = module.params.get('name') + iops = module.params.get("iops") + encrypted = module.params.get("encrypted") + kms_key_id = module.params.get("kms_key_id") + volume_size = module.params.get("volume_size") + volume_type = module.params.get("volume_type") + snapshot = module.params.get("snapshot") + throughput = module.params.get("throughput") + multi_attach = module.params.get("multi_attach") + outpost_arn = module.params.get("outpost_arn") + tags = module.params.get("tags") or {} + name = module.params.get("name") volume = get_volume(module, ec2_conn) if module.check_mode: - module.exit_json(changed=True, msg='Would have created a volume if not in check mode.') + module.exit_json(changed=True, msg="Would have created a volume if not in check mode.") if volume is None: - try: changed = True additional_params = dict() if volume_size: - additional_params['Size'] = int(volume_size) + additional_params["Size"] = int(volume_size) if kms_key_id: - additional_params['KmsKeyId'] = kms_key_id + additional_params["KmsKeyId"] = kms_key_id if snapshot: - additional_params['SnapshotId'] = snapshot + additional_params["SnapshotId"] = snapshot if iops: - additional_params['Iops'] = int(iops) + additional_params["Iops"] = int(iops) # Use the default value if any iops has been specified when volume_type=gp3 - if volume_type == 'gp3' and not iops: - additional_params['Iops'] = 3000 + if volume_type == "gp3" and not iops: + additional_params["Iops"] = 3000 if throughput: - additional_params['Throughput'] = int(throughput) + additional_params["Throughput"] = int(throughput) if multi_attach: - additional_params['MultiAttachEnabled'] = True + additional_params["MultiAttachEnabled"] = True if outpost_arn: if is_outpost_arn(outpost_arn): - additional_params['OutpostArn'] = outpost_arn + additional_params["OutpostArn"] = outpost_arn else: - module.fail_json('OutpostArn does not match the pattern specified in API specifications.') + module.fail_json("OutpostArn does not match the pattern specified in API specifications.") if name: - tags['Name'] = name + tags["Name"] = name if tags: - additional_params['TagSpecifications'] = boto3_tag_specifications(tags, types=['volume']) + additional_params["TagSpecifications"] = boto3_tag_specifications(tags, types=["volume"]) create_vol_response = ec2_conn.create_volume( - aws_retry=True, - AvailabilityZone=zone, - Encrypted=encrypted, - VolumeType=volume_type, - **additional_params + aws_retry=True, AvailabilityZone=zone, Encrypted=encrypted, VolumeType=volume_type, **additional_params ) - waiter = ec2_conn.get_waiter('volume_available') + waiter = ec2_conn.get_waiter("volume_available") waiter.wait( - VolumeIds=[create_vol_response['VolumeId']], + VolumeIds=[create_vol_response["VolumeId"]], ) - volume = get_volume(module, ec2_conn, vol_id=create_vol_response['VolumeId']) + volume = get_volume(module, ec2_conn, vol_id=create_vol_response["VolumeId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while creating EBS volume') + module.fail_json_aws(e, msg="Error while creating EBS volume") return volume, changed @@ -531,45 +524,52 @@ def attach_volume(module, ec2_conn, volume_dict, instance_dict, device_name): # In future this needs to be more dynamic but combining block device mapping best practices # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) - attachment_data = get_attachment_data(volume_dict, wanted_state='attached') + attachment_data = get_attachment_data(volume_dict, wanted_state="attached") if attachment_data: if module.check_mode: - if attachment_data[0].get('status') in ['attached', 'attaching']: - module.exit_json(changed=False, msg='IN CHECK MODE - volume already attached to instance: {0}.'.format( - attachment_data[0].get('instance_id', None))) - if not volume_dict['multi_attach_enabled']: + if attachment_data[0].get("status") in ["attached", "attaching"]: + instance_id = attachment_data[0].get("instance_id", "None") + module.exit_json( + changed=False, msg=f"IN CHECK MODE - volume already attached to instance: {instance_id}." + ) + if not volume_dict["multi_attach_enabled"]: # volumes without MultiAttach Enabled can be attached to 1 instance only - if attachment_data[0].get('instance_id', None) != instance_dict['instance_id']: - module.fail_json(msg="Volume {0} is already attached to another instance: {1}." - .format(volume_dict['volume_id'], attachment_data[0].get('instance_id', None))) + if attachment_data[0].get("instance_id", None) != instance_dict["instance_id"]: + instance_id = attachment_data[0].get("instance_id", "None") + module.fail_json( + msg=f"Volume {volume_dict['volume_id']} is already attached to another instance: {instance_id}." + ) else: return volume_dict, changed try: if module.check_mode: - module.exit_json(changed=True, msg='Would have attached volume if not in check mode.') - attach_response = ec2_conn.attach_volume(aws_retry=True, Device=device_name, - InstanceId=instance_dict['instance_id'], - VolumeId=volume_dict['volume_id']) + module.exit_json(changed=True, msg="Would have attached volume if not in check mode.") + attach_response = ec2_conn.attach_volume( + aws_retry=True, + Device=device_name, + InstanceId=instance_dict["instance_id"], + VolumeId=volume_dict["volume_id"], + ) - waiter = ec2_conn.get_waiter('volume_in_use') - waiter.wait(VolumeIds=[attach_response['VolumeId']]) + waiter = ec2_conn.get_waiter("volume_in_use") + waiter.wait(VolumeIds=[attach_response["VolumeId"]]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while attaching EBS volume') + module.fail_json_aws(e, msg="Error while attaching EBS volume") modify_dot_attribute(module, ec2_conn, instance_dict, device_name) - volume = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id']) + volume = get_volume(module, ec2_conn, vol_id=volume_dict["volume_id"]) return volume, changed def modify_dot_attribute(module, ec2_conn, instance_dict, device_name): - """ Modify delete_on_termination attribute """ + """Modify delete_on_termination attribute""" - delete_on_termination = module.params.get('delete_on_termination') + delete_on_termination = module.params.get("delete_on_termination") changed = False # volume_in_use can return *shortly* before it appears on the instance @@ -578,30 +578,27 @@ def modify_dot_attribute(module, ec2_conn, instance_dict, device_name): _attempt = 0 while mapped_block_device is None: _attempt += 1 - instance_dict = get_instance(module, ec2_conn=ec2_conn, instance_id=instance_dict['instance_id']) + instance_dict = get_instance(module, ec2_conn=ec2_conn, instance_id=instance_dict["instance_id"]) mapped_block_device = get_mapped_block_device(instance_dict=instance_dict, device_name=device_name) if mapped_block_device is None: if _attempt > 2: - module.fail_json(msg='Unable to find device on instance', - device=device_name, instance=instance_dict) + module.fail_json(msg="Unable to find device on instance", device=device_name, instance=instance_dict) time.sleep(1) - if delete_on_termination != mapped_block_device['ebs'].get('delete_on_termination'): + if delete_on_termination != mapped_block_device["ebs"].get("delete_on_termination"): try: ec2_conn.modify_instance_attribute( aws_retry=True, - InstanceId=instance_dict['instance_id'], - BlockDeviceMappings=[{ - "DeviceName": device_name, - "Ebs": { - "DeleteOnTermination": delete_on_termination - } - }] + InstanceId=instance_dict["instance_id"], + BlockDeviceMappings=[ + {"DeviceName": device_name, "Ebs": {"DeleteOnTermination": delete_on_termination}} + ], ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, - msg='Error while modifying Block Device Mapping of instance {0}'.format(instance_dict['instance_id'])) + module.fail_json_aws( + e, msg=f"Error while modifying Block Device Mapping of instance {instance_dict['instance_id']}" + ) return changed @@ -610,19 +607,21 @@ def get_attachment_data(volume_dict, wanted_state=None): attachment_data = [] if not volume_dict: return attachment_data - resource = volume_dict.get('attachments', []) + resource = volume_dict.get("attachments", []) if wanted_state: # filter 'state', return attachment matching wanted state - resource = [data for data in resource if data['state'] == wanted_state] + resource = [data for data in resource if data["state"] == wanted_state] for data in resource: - attachment_data.append({ - 'attach_time': data.get('attach_time', None), - 'device': data.get('device', None), - 'instance_id': data.get('instance_id', None), - 'status': data.get('state', None), - 'delete_on_termination': data.get('delete_on_termination', None) - }) + attachment_data.append( + { + "attach_time": data.get("attach_time", None), + "device": data.get("device", None), + "instance_id": data.get("instance_id", None), + "status": data.get("state", None), + "delete_on_termination": data.get("delete_on_termination", None), + } + ) return attachment_data @@ -630,42 +629,42 @@ def get_attachment_data(volume_dict, wanted_state=None): def detach_volume(module, ec2_conn, volume_dict): changed = False - attachment_data = get_attachment_data(volume_dict, wanted_state='attached') + attachment_data = get_attachment_data(volume_dict, wanted_state="attached") # The ID of the instance must be specified if you are detaching a Multi-Attach enabled volume. for attachment in attachment_data: if module.check_mode: - module.exit_json(changed=True, msg='Would have detached volume if not in check mode.') - ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment['instance_id'], VolumeId=volume_dict['volume_id']) - waiter = ec2_conn.get_waiter('volume_available') + module.exit_json(changed=True, msg="Would have detached volume if not in check mode.") + ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment["instance_id"], VolumeId=volume_dict["volume_id"]) + waiter = ec2_conn.get_waiter("volume_available") waiter.wait( - VolumeIds=[volume_dict['volume_id']], + VolumeIds=[volume_dict["volume_id"]], ) changed = True - volume_dict = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id']) + volume_dict = get_volume(module, ec2_conn, vol_id=volume_dict["volume_id"]) return volume_dict, changed def get_volume_info(module, volume, tags=None): if not tags: - tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) + tags = boto3_tag_list_to_ansible_dict(volume.get("tags")) attachment_data = get_attachment_data(volume) volume_info = { - 'create_time': volume.get('create_time'), - 'encrypted': volume.get('encrypted'), - 'id': volume.get('volume_id'), - 'iops': volume.get('iops'), - 'size': volume.get('size'), - 'snapshot_id': volume.get('snapshot_id'), - 'status': volume.get('state'), - 'type': volume.get('volume_type'), - 'zone': volume.get('availability_zone'), - 'attachment_set': attachment_data, - 'multi_attach_enabled': volume.get('multi_attach_enabled'), - 'tags': tags + "create_time": volume.get("create_time"), + "encrypted": volume.get("encrypted"), + "id": volume.get("volume_id"), + "iops": volume.get("iops"), + "size": volume.get("size"), + "snapshot_id": volume.get("snapshot_id"), + "status": volume.get("state"), + "type": volume.get("volume_type"), + "zone": volume.get("availability_zone"), + "attachment_set": attachment_data, + "multi_attach_enabled": volume.get("multi_attach_enabled"), + "tags": tags, } - volume_info['throughput'] = volume.get('throughput') + volume_info["throughput"] = volume.get("throughput") return volume_info @@ -677,8 +676,8 @@ def get_mapped_block_device(instance_dict=None, device_name=None): if not device_name: return mapped_block_device - for device in instance_dict.get('block_device_mappings', []): - if device['device_name'] == device_name: + for device in instance_dict.get("block_device_mappings", []): + if device["device_name"] == device_name: mapped_block_device = device break @@ -688,7 +687,7 @@ def get_mapped_block_device(instance_dict=None, device_name=None): def ensure_tags(module, connection, res_id, res_type, tags, purge_tags): if module.check_mode: return {}, True - changed = ensure_ec2_tags(connection, module, res_id, res_type, tags, purge_tags, ['InvalidVolume.NotFound']) + changed = ensure_ec2_tags(connection, module, res_id, res_type, tags, purge_tags, ["InvalidVolume.NotFound"]) final_tags = describe_ec2_tags(connection, module, res_id, res_type) return final_tags, changed @@ -699,81 +698,81 @@ def main(): instance=dict(), id=dict(), name=dict(), - volume_size=dict(type='int'), - volume_type=dict(default='standard', choices=['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']), - iops=dict(type='int'), - encrypted=dict(default=False, type='bool'), + volume_size=dict(type="int"), + volume_type=dict(default="standard", choices=["standard", "gp2", "io1", "st1", "sc1", "gp3", "io2"]), + iops=dict(type="int"), + encrypted=dict(default=False, type="bool"), kms_key_id=dict(), device_name=dict(), - delete_on_termination=dict(default=False, type='bool'), - zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), + delete_on_termination=dict(default=False, type="bool"), + zone=dict(aliases=["availability_zone", "aws_zone", "ec2_zone"]), snapshot=dict(), - state=dict(default='present', choices=['absent', 'present']), - tags=dict(type='dict', aliases=['resource_tags']), - modify_volume=dict(default=False, type='bool'), - throughput=dict(type='int'), - outpost_arn=dict(type='str'), - purge_tags=dict(type='bool', default=True), - multi_attach=dict(type='bool'), + state=dict(default="present", choices=["absent", "present"]), + tags=dict(type="dict", aliases=["resource_tags"]), + modify_volume=dict(default=False, type="bool"), + throughput=dict(type="int"), + outpost_arn=dict(type="str"), + purge_tags=dict(type="bool", default=True), + multi_attach=dict(type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[ - ['volume_type', 'io1', ['iops']], - ['volume_type', 'io2', ['iops']], + ["volume_type", "io1", ["iops"]], + ["volume_type", "io2", ["iops"]], ], supports_check_mode=True, ) - param_id = module.params.get('id') - name = module.params.get('name') - instance = module.params.get('instance') - volume_size = module.params.get('volume_size') - device_name = module.params.get('device_name') - zone = module.params.get('zone') - snapshot = module.params.get('snapshot') - state = module.params.get('state') - tags = module.params.get('tags') - iops = module.params.get('iops') - volume_type = module.params.get('volume_type') - throughput = module.params.get('throughput') - multi_attach = module.params.get('multi_attach') + param_id = module.params.get("id") + name = module.params.get("name") + instance = module.params.get("instance") + volume_size = module.params.get("volume_size") + device_name = module.params.get("device_name") + zone = module.params.get("zone") + snapshot = module.params.get("snapshot") + state = module.params.get("state") + tags = module.params.get("tags") + iops = module.params.get("iops") + volume_type = module.params.get("volume_type") + throughput = module.params.get("throughput") + multi_attach = module.params.get("multi_attach") # Ensure we have the zone or can get the zone - if instance is None and zone is None and state == 'present': + if instance is None and zone is None and state == "present": module.fail_json(msg="You must specify either instance or zone") # Set volume detach flag - if instance == 'None' or instance == '': + if instance == "None" or instance == "": instance = None detach_vol_flag = True else: detach_vol_flag = False if iops: - if volume_type in ('gp2', 'st1', 'sc1', 'standard'): - module.fail_json(msg='IOPS is not supported for gp2, st1, sc1, or standard volumes.') + if volume_type in ("gp2", "st1", "sc1", "standard"): + module.fail_json(msg="IOPS is not supported for gp2, st1, sc1, or standard volumes.") - if volume_type == 'gp3' and (int(iops) < 3000 or int(iops) > 16000): - module.fail_json(msg='For a gp3 volume type, IOPS values must be between 3000 and 16000.') + if volume_type == "gp3" and (int(iops) < 3000 or int(iops) > 16000): + module.fail_json(msg="For a gp3 volume type, IOPS values must be between 3000 and 16000.") - if volume_type in ('io1', 'io2') and (int(iops) < 100 or int(iops) > 64000): - module.fail_json(msg='For io1 and io2 volume types, IOPS values must be between 100 and 64000.') + if volume_type in ("io1", "io2") and (int(iops) < 100 or int(iops) > 64000): + module.fail_json(msg="For io1 and io2 volume types, IOPS values must be between 100 and 64000.") if throughput: - if volume_type != 'gp3': - module.fail_json(msg='Throughput is only supported for gp3 volume.') + if volume_type != "gp3": + module.fail_json(msg="Throughput is only supported for gp3 volume.") if throughput < 125 or throughput > 1000: - module.fail_json(msg='Throughput values must be between 125 and 1000.') + module.fail_json(msg="Throughput values must be between 125 and 1000.") - if multi_attach is True and volume_type not in ('io1', 'io2'): - module.fail_json(msg='multi_attach is only supported for io1 and io2 volumes.') + if multi_attach is True and volume_type not in ("io1", "io2"): + module.fail_json(msg="multi_attach is only supported for io1 and io2 volumes.") # Set changed flag changed = False - ec2_conn = module.client('ec2', AWSRetry.jittered_backoff()) + ec2_conn = module.client("ec2", AWSRetry.jittered_backoff()) # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. @@ -788,24 +787,24 @@ def main(): # Try getting volume volume = get_volume(module, ec2_conn, fail_on_not_found=False) - if state == 'present': + if state == "present": if instance: inst = get_instance(module, ec2_conn, instance_id=instance) - zone = inst['placement']['availability_zone'] + zone = inst["placement"]["availability_zone"] # Use platform attribute to guess whether the instance is Windows or Linux if device_name is None: - if inst.get('platform', '') == 'Windows': - device_name = '/dev/xvdf' + if inst.get("platform", "") == "Windows": + device_name = "/dev/xvdf" else: - device_name = '/dev/sdf' + device_name = "/dev/sdf" # Check if there is a volume already mounted there. mapped_device = get_mapped_block_device(instance_dict=inst, device_name=device_name) if mapped_device: other_volume_mapped = False if volume: - if volume['volume_id'] != mapped_device['ebs']['volume_id']: + if volume["volume_id"] != mapped_device["ebs"]["volume_id"]: other_volume_mapped = True else: # No volume found so this is another volume @@ -813,11 +812,11 @@ def main(): if other_volume_mapped: module.exit_json( - msg="Volume mapping for {0} already exists on instance {1}".format(device_name, instance), - volume_id=mapped_device['ebs']['volume_id'], + msg=f"Volume mapping for {device_name} already exists on instance {instance}", + volume_id=mapped_device["ebs"]["volume_id"], found_volume=volume, device=device_name, - changed=False + changed=False, ) final_tags = None @@ -826,16 +825,20 @@ def main(): volume, changed = update_volume(module, ec2_conn, volume) if name: if not tags: - tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) - tags['Name'] = name - final_tags, tags_changed = ensure_tags(module, ec2_conn, volume['volume_id'], 'volume', tags, module.params.get('purge_tags')) + tags = boto3_tag_list_to_ansible_dict(volume.get("tags")) + tags["Name"] = name + final_tags, tags_changed = ensure_tags( + module, ec2_conn, volume["volume_id"], "volume", tags, module.params.get("purge_tags") + ) else: volume, changed = create_volume(module, ec2_conn, zone=zone) if detach_vol_flag: volume, attach_changed = detach_volume(module, ec2_conn, volume_dict=volume) elif inst is not None: - volume, attach_changed = attach_volume(module, ec2_conn, volume_dict=volume, instance_dict=inst, device_name=device_name) + volume, attach_changed = attach_volume( + module, ec2_conn, volume_dict=volume, instance_dict=inst, device_name=device_name + ) else: attach_changed = False @@ -845,18 +848,23 @@ def main(): if tags_changed or attach_changed: changed = True - module.exit_json(changed=changed, volume=volume_info, device=device_name, - volume_id=volume_info['id'], volume_type=volume_info['type']) - elif state == 'absent': + module.exit_json( + changed=changed, + volume=volume_info, + device=device_name, + volume_id=volume_info["id"], + volume_type=volume_info["type"], + ) + elif state == "absent": if not name and not param_id: - module.fail_json('A volume name or id is required for deletion') + module.fail_json("A volume name or id is required for deletion") if volume: if module.check_mode: - module.exit_json(changed=True, msg='Would have deleted volume if not in check mode.') + module.exit_json(changed=True, msg="Would have deleted volume if not in check mode.") detach_volume(module, ec2_conn, volume_dict=volume) - changed = delete_volume(module, ec2_conn, volume_id=volume['volume_id']) + changed = delete_volume(module, ec2_conn, volume_id=volume["volume_id"]) module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py index 7cd376740..c72fb5da2 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vol_info version_added: 1.0.0 @@ -22,12 +20,12 @@ options: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all volumes @@ -56,10 +54,9 @@ EXAMPLES = ''' filters: attachment.instance-id: "i-000111222333" register: volumes +""" -''' - -RETURN = ''' +RETURN = r""" volumes: description: Volumes that match the provided filters. Each element consists of a dict with all the information related to that volume. type: list @@ -125,7 +122,7 @@ volumes: description: The throughput that the volume supports, in MiB/s. type: int sample: 131 -''' +""" try: from botocore.exceptions import ClientError @@ -134,53 +131,53 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def get_volume_info(volume, region): - attachment_data = [] for data in volume["attachments"]: - attachment_data.append({ - 'attach_time': data.get('attach_time', None), - 'device': data.get('device', None), - 'instance_id': data.get('instance_id', None), - 'status': data.get('state', None), - 'delete_on_termination': data.get('delete_on_termination', None) - }) + attachment_data.append( + { + "attach_time": data.get("attach_time", None), + "device": data.get("device", None), + "instance_id": data.get("instance_id", None), + "status": data.get("state", None), + "delete_on_termination": data.get("delete_on_termination", None), + } + ) volume_info = { - 'create_time': volume["create_time"], - 'id': volume["volume_id"], - 'encrypted': volume["encrypted"], - 'iops': volume["iops"] if "iops" in volume else None, - 'size': volume["size"], - 'snapshot_id': volume["snapshot_id"], - 'status': volume["state"], - 'type': volume["volume_type"], - 'zone': volume["availability_zone"], - 'region': region, - 'attachment_set': attachment_data, - 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None + "create_time": volume["create_time"], + "id": volume["volume_id"], + "encrypted": volume["encrypted"], + "iops": volume["iops"] if "iops" in volume else None, + "size": volume["size"], + "snapshot_id": volume["snapshot_id"], + "status": volume["state"], + "type": volume["volume_type"], + "zone": volume["availability_zone"], + "region": region, + "attachment_set": attachment_data, + "tags": boto3_tag_list_to_ansible_dict(volume["tags"]) if "tags" in volume else None, } - if 'throughput' in volume: - volume_info['throughput'] = volume["throughput"] + if "throughput" in volume: + volume_info["throughput"] = volume["throughput"] return volume_info @AWSRetry.jittered_backoff() def describe_volumes_with_backoff(connection, filters): - paginator = connection.get_paginator('describe_volumes') + paginator = connection.get_paginator("describe_volumes") return paginator.paginate(Filters=filters).build_full_result() def list_ec2_volumes(connection, module): - # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags sanitized_filters = module.params.get("filters") for key in list(sanitized_filters): @@ -194,20 +191,20 @@ def list_ec2_volumes(connection, module): module.fail_json_aws(e, msg="Failed to describe volumes.") for volume in all_volumes["Volumes"]: - volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags']) + volume = camel_dict_to_snake_dict(volume, ignore_list=["Tags"]) volume_dict_array.append(get_volume_info(volume, module.region)) module.exit_json(volumes=volume_dict_array) def main(): - argument_spec = dict(filters=dict(default={}, type='dict')) + argument_spec = dict(filters=dict(default={}, type="dict")) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + connection = module.client("ec2") list_ec2_volumes(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py index edfdf7be3..7ed8865ca 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_dhcp_option version_added: 1.0.0 @@ -91,13 +89,13 @@ options: notes: - Support for I(purge_tags) was added in release 2.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -RETURN = """ +RETURN = r""" changed: description: Whether the dhcp options were changed type: bool @@ -170,26 +168,25 @@ dhcp_config: sample: 2 """ -EXAMPLES = """ +EXAMPLES = r""" # Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing # DHCP option set that may have been attached to that VPC. - amazon.aws.ec2_vpc_dhcp_option: domain_name: "foo.example.com" region: us-east-1 dns_servers: - - 10.0.0.1 - - 10.0.1.1 + - 10.0.0.1 + - 10.0.1.1 ntp_servers: - - 10.0.0.2 - - 10.0.1.2 + - 10.0.0.2 + - 10.0.1.2 netbios_name_servers: - - 10.0.0.1 - - 10.0.1.1 + - 10.0.0.1 + - 10.0.1.1 netbios_node_type: 2 vpc_id: vpc-123456 - delete_old: True - inherit_existing: False - + delete_old: true + inherit_existing: false # Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but # keep any other existing settings. Also, keep the old DHCP option set around. @@ -199,9 +196,8 @@ EXAMPLES = """ - "{{groups['dns-primary']}}" - "{{groups['dns-secondary']}}" vpc_id: vpc-123456 - inherit_existing: True - delete_old: False - + inherit_existing: true + delete_old: false ## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags ## but do not assign to a VPC @@ -230,7 +226,6 @@ EXAMPLES = """ region: us-east-1 dhcp_options_id: dopt-12345678 vpc_id: vpc-123456 - """ try: @@ -238,53 +233,59 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications def fetch_dhcp_options_for_vpc(client, module, vpc_id): try: - vpcs = client.describe_vpcs(aws_retry=True, VpcIds=[vpc_id])['Vpcs'] + vpcs = client.describe_vpcs(aws_retry=True, VpcIds=[vpc_id])["Vpcs"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe vpc {0}".format(vpc_id)) + module.fail_json_aws(e, msg=f"Unable to describe vpc {vpc_id}") if len(vpcs) != 1: return None try: - dhcp_options = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[vpcs[0]['DhcpOptionsId']]) + dhcp_options = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[vpcs[0]["DhcpOptionsId"]]) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe dhcp option {0}".format(vpcs[0]['DhcpOptionsId'])) + module.fail_json_aws(e, msg=f"Unable to describe dhcp option {vpcs[0]['DhcpOptionsId']}") - if len(dhcp_options['DhcpOptions']) != 1: + if len(dhcp_options["DhcpOptions"]) != 1: return None - return dhcp_options['DhcpOptions'][0]['DhcpConfigurations'], dhcp_options['DhcpOptions'][0]['DhcpOptionsId'] + return dhcp_options["DhcpOptions"][0]["DhcpConfigurations"], dhcp_options["DhcpOptions"][0]["DhcpOptionsId"] def remove_dhcp_options_by_id(client, module, dhcp_options_id): changed = False # First, check if this dhcp option is associated to any other vpcs try: - associations = client.describe_vpcs(aws_retry=True, Filters=[{'Name': 'dhcp-options-id', 'Values': [dhcp_options_id]}]) + associations = client.describe_vpcs( + aws_retry=True, Filters=[{"Name": "dhcp-options-id", "Values": [dhcp_options_id]}] + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe VPC associations for dhcp option id {0}".format(dhcp_options_id)) - if len(associations['Vpcs']) > 0: + module.fail_json_aws(e, msg=f"Unable to describe VPC associations for dhcp option id {dhcp_options_id}") + if len(associations["Vpcs"]) > 0: return changed changed = True if not module.check_mode: try: client.delete_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id) - except is_boto3_error_code('InvalidDhcpOptionsID.NotFound'): + except is_boto3_error_code("InvalidDhcpOptionsID.NotFound"): return False - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete dhcp option {0}".format(dhcp_options_id)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to delete dhcp option {dhcp_options_id}") return changed @@ -299,14 +300,14 @@ def match_dhcp_options(client, module, new_config): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Unable to describe dhcp options") - for dopts in all_dhcp_options['DhcpOptions']: - if module.params['tags']: + for dopts in all_dhcp_options["DhcpOptions"]: + if module.params["tags"]: # If we were given tags, try to match on them - boto_tags = ansible_dict_to_boto3_tag_list(module.params['tags']) - if dopts['DhcpConfigurations'] == new_config and dopts['Tags'] == boto_tags: - return True, dopts['DhcpOptionsId'] - elif dopts['DhcpConfigurations'] == new_config: - return True, dopts['DhcpOptionsId'] + boto_tags = ansible_dict_to_boto3_tag_list(module.params["tags"]) + if dopts["DhcpConfigurations"] == new_config and dopts["Tags"] == boto_tags: + return True, dopts["DhcpOptionsId"] + elif dopts["DhcpConfigurations"] == new_config: + return True, dopts["DhcpOptionsId"] return False, None @@ -323,25 +324,25 @@ def create_dhcp_config(module): """ new_config = [] params = module.params - if params['domain_name'] is not None: - new_config.append({'Key': 'domain-name', 'Values': [{'Value': params['domain_name']}]}) - if params['dns_servers'] is not None: + if params["domain_name"] is not None: + new_config.append({"Key": "domain-name", "Values": [{"Value": params["domain_name"]}]}) + if params["dns_servers"] is not None: dns_server_list = [] - for server in params['dns_servers']: - dns_server_list.append({'Value': server}) - new_config.append({'Key': 'domain-name-servers', 'Values': dns_server_list}) - if params['ntp_servers'] is not None: + for server in params["dns_servers"]: + dns_server_list.append({"Value": server}) + new_config.append({"Key": "domain-name-servers", "Values": dns_server_list}) + if params["ntp_servers"] is not None: ntp_server_list = [] - for server in params['ntp_servers']: - ntp_server_list.append({'Value': server}) - new_config.append({'Key': 'ntp-servers', 'Values': ntp_server_list}) - if params['netbios_name_servers'] is not None: + for server in params["ntp_servers"]: + ntp_server_list.append({"Value": server}) + new_config.append({"Key": "ntp-servers", "Values": ntp_server_list}) + if params["netbios_name_servers"] is not None: netbios_server_list = [] - for server in params['netbios_name_servers']: - netbios_server_list.append({'Value': server}) - new_config.append({'Key': 'netbios-name-servers', 'Values': netbios_server_list}) - if params['netbios_node_type'] is not None: - new_config.append({'Key': 'netbios-node-type', 'Values': params['netbios_node_type']}) + for server in params["netbios_name_servers"]: + netbios_server_list.append({"Value": server}) + new_config.append({"Key": "netbios-name-servers", "Values": netbios_server_list}) + if params["netbios_node_type"] is not None: + new_config.append({"Key": "netbios-node-type", "Values": params["netbios_node_type"]}) return new_config @@ -360,20 +361,22 @@ def create_dhcp_option_set(client, module, new_config): create_config = [] tags_list = [] - for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']: + for option in ["domain-name", "domain-name-servers", "ntp-servers", "netbios-name-servers"]: if desired_config.get(option): - create_config.append({'Key': option, 'Values': desired_config[option]}) - if desired_config.get('netbios-node-type'): + create_config.append({"Key": option, "Values": desired_config[option]}) + if desired_config.get("netbios-node-type"): # We need to listify this one - create_config.append({'Key': 'netbios-node-type', 'Values': [desired_config['netbios-node-type']]}) + create_config.append({"Key": "netbios-node-type", "Values": [desired_config["netbios-node-type"]]}) - if module.params.get('tags'): - tags_list = boto3_tag_specifications(module.params['tags'], ['dhcp-options']) + if module.params.get("tags"): + tags_list = boto3_tag_specifications(module.params["tags"], ["dhcp-options"]) try: if not module.check_mode: - dhcp_options = client.create_dhcp_options(aws_retry=True, DhcpConfigurations=create_config, TagSpecifications=tags_list) - return changed, dhcp_options['DhcpOptions']['DhcpOptionsId'] + dhcp_options = client.create_dhcp_options( + aws_retry=True, DhcpConfigurations=create_config, TagSpecifications=tags_list + ) + return changed, dhcp_options["DhcpOptions"]["DhcpOptionsId"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Unable to create dhcp option set") @@ -381,7 +384,7 @@ def create_dhcp_option_set(client, module, new_config): def find_opt_index(config, option): - return (next((i for i, item in enumerate(config) if item["Key"] == option), None)) + return next((i for i, item in enumerate(config) if item["Key"] == option), None) def inherit_dhcp_config(existing_config, new_config): @@ -394,8 +397,7 @@ def inherit_dhcp_config(existing_config, new_config): the right list index for a given config option first. """ changed = False - for option in ['domain-name', 'domain-name-servers', 'ntp-servers', - 'netbios-name-servers', 'netbios-node-type']: + for option in ["domain-name", "domain-name-servers", "ntp-servers", "netbios-name-servers", "netbios-node-type"]: existing_index = find_opt_index(existing_config, option) new_index = find_opt_index(new_config, option) # `if existing_index` evaluates to False on index 0, so be very specific and verbose @@ -414,15 +416,21 @@ def get_dhcp_options_info(client, module, dhcp_options_id): return None try: - dhcp_option_info = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[dhcp_options_id]) + dhcp_option_info = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidDhcpOptionID.NotFound"])( + client.describe_dhcp_options, + )( + DhcpOptionsIds=[dhcp_options_id], + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Unable to describe dhcp options") - dhcp_options_set = dhcp_option_info['DhcpOptions'][0] - dhcp_option_info = {'DhcpOptionsId': dhcp_options_set['DhcpOptionsId'], - 'DhcpConfigurations': dhcp_options_set['DhcpConfigurations'], - 'Tags': boto3_tag_list_to_ansible_dict(dhcp_options_set.get('Tags', [{'Value': '', 'Key': 'Name'}]))} - return camel_dict_to_snake_dict(dhcp_option_info, ignore_list=['Tags']) + dhcp_options_set = dhcp_option_info["DhcpOptions"][0] + dhcp_option_info = { + "DhcpOptionsId": dhcp_options_set["DhcpOptionsId"], + "DhcpConfigurations": dhcp_options_set["DhcpConfigurations"], + "Tags": boto3_tag_list_to_ansible_dict(dhcp_options_set.get("Tags", [{"Value": "", "Key": "Name"}])), + } + return camel_dict_to_snake_dict(dhcp_option_info, ignore_list=["Tags"]) def associate_options(client, module, vpc_id, dhcp_options_id): @@ -430,38 +438,34 @@ def associate_options(client, module, vpc_id, dhcp_options_id): if not module.check_mode: client.associate_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id, VpcId=vpc_id) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to associate dhcp option {0} to VPC {1}".format(dhcp_options_id, vpc_id)) + module.fail_json_aws(e, msg=f"Unable to associate dhcp option {dhcp_options_id} to VPC {vpc_id}") def main(): argument_spec = dict( - dhcp_options_id=dict(type='str', default=None), - domain_name=dict(type='str', default=None), - dns_servers=dict(type='list', elements='str', default=None), - ntp_servers=dict(type='list', elements='str', default=None), - netbios_name_servers=dict(type='list', elements='str', default=None), - netbios_node_type=dict(type='int', default=None), - vpc_id=dict(type='str', default=None), - delete_old=dict(type='bool', default=True), - inherit_existing=dict(type='bool', default=False), - tags=dict(type='dict', default=None, aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent']) + dhcp_options_id=dict(type="str", default=None), + domain_name=dict(type="str", default=None), + dns_servers=dict(type="list", elements="str", default=None), + ntp_servers=dict(type="list", elements="str", default=None), + netbios_name_servers=dict(type="list", elements="str", default=None), + netbios_node_type=dict(type="int", default=None), + vpc_id=dict(type="str", default=None), + delete_old=dict(type="bool", default=True), + inherit_existing=dict(type="bool", default=False), + tags=dict(type="dict", default=None, aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + state=dict(type="str", default="present", choices=["present", "absent"]), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - check_boto3=False, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False, supports_check_mode=True) - vpc_id = module.params['vpc_id'] - delete_old = module.params['delete_old'] - inherit_existing = module.params['inherit_existing'] - tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - state = module.params['state'] - dhcp_options_id = module.params['dhcp_options_id'] + vpc_id = module.params["vpc_id"] + delete_old = module.params["delete_old"] + inherit_existing = module.params["inherit_existing"] + tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + state = module.params["state"] + dhcp_options_id = module.params["dhcp_options_id"] found = False changed = False @@ -469,16 +473,14 @@ def main(): existing_config = None existing_id = None - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - module.deprecate("The 'new_config' return key is deprecated and will be replaced by 'dhcp_config'. Both values are returned for now.", - date='2022-12-01', collection_name='amazon.aws') - if state == 'absent': + if state == "absent": if not dhcp_options_id: # Look up the option id first by matching the supplied options dhcp_options_id = match_dhcp_options(client, module, new_config) changed = remove_dhcp_options_by_id(client, module, dhcp_options_id) - module.exit_json(changed=changed, new_options={}, dhcp_options={}) + module.exit_json(changed=changed, dhcp_options={}, dhcp_config={}) if not dhcp_options_id: # If we were given a vpc_id then we need to look at the configuration on that @@ -492,11 +494,22 @@ def main(): if new_config == existing_config: dhcp_options_id = existing_id if tags or purge_tags: - changed |= ensure_ec2_tags(client, module, dhcp_options_id, resource_type='dhcp-options', - tags=tags, purge_tags=purge_tags) + changed |= ensure_ec2_tags( + client, + module, + dhcp_options_id, + resource_type="dhcp-options", + tags=tags, + purge_tags=purge_tags, + ) return_config = normalize_ec2_vpc_dhcp_config(new_config) results = get_dhcp_options_info(client, module, dhcp_options_id) - module.exit_json(changed=changed, new_options=return_config, dhcp_options_id=dhcp_options_id, dhcp_options=results) + module.exit_json( + changed=changed, + dhcp_options_id=dhcp_options_id, + dhcp_options=results, + dhcp_config=return_config, + ) # If no vpc_id was given, or the options don't match then look for an existing set using tags found, dhcp_options_id = match_dhcp_options(client, module, new_config) @@ -517,21 +530,22 @@ def main(): changed, dhcp_options_id = create_dhcp_option_set(client, module, new_config) else: if tags or purge_tags: - changed |= ensure_ec2_tags(client, module, dhcp_options_id, resource_type='dhcp-options', - tags=tags, purge_tags=purge_tags) + changed |= ensure_ec2_tags( + client, module, dhcp_options_id, resource_type="dhcp-options", tags=tags, purge_tags=purge_tags + ) # If we were given a vpc_id, then attach the options we now have to that before we finish if vpc_id: associate_options(client, module, vpc_id, dhcp_options_id) - changed = (changed or True) + changed = changed or True if delete_old and existing_id: remove_dhcp_options_by_id(client, module, existing_id) return_config = normalize_ec2_vpc_dhcp_config(new_config) results = get_dhcp_options_info(client, module, dhcp_options_id) - module.exit_json(changed=changed, new_options=return_config, dhcp_options_id=dhcp_options_id, dhcp_options=results, dhcp_config=return_config) + module.exit_json(changed=changed, dhcp_options_id=dhcp_options_id, dhcp_options=results, dhcp_config=return_config) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py index c5058bd7a..bb51377b5 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_dhcp_option_info version_added: 1.0.0 @@ -33,12 +31,12 @@ options: type: bool default: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all DHCP Option sets for an account or profile @@ -52,7 +50,7 @@ EXAMPLES = ''' region: ap-southeast-2 profile: production filters: - "tag:Name": "abc-123" + "tag:Name": "abc-123" register: dhcp_info - name: Gather information about a specific DHCP Option set by DhcpOptionId @@ -61,10 +59,9 @@ EXAMPLES = ''' profile: production dhcp_options_ids: dopt-123fece2 register: dhcp_info +""" -''' - -RETURN = ''' +RETURN = r""" dhcp_options: description: The DHCP options created, associated or found. returned: always @@ -150,7 +147,7 @@ changed: description: True if listing the dhcp options succeeds. type: bool returned: always -''' +""" try: import botocore @@ -159,52 +156,56 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def get_dhcp_options_info(dhcp_option): - dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'], - 'DhcpConfigurations': dhcp_option['DhcpConfigurations'], - 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))} + dhcp_option_info = { + "DhcpOptionsId": dhcp_option["DhcpOptionsId"], + "DhcpConfigurations": dhcp_option["DhcpConfigurations"], + "Tags": boto3_tag_list_to_ansible_dict(dhcp_option.get("Tags", [{"Value": "", "Key": "Name"}])), + } return dhcp_option_info def list_dhcp_options(client, module): - params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters'))) + params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get("filters"))) if module.params.get("dry_run"): - params['DryRun'] = True + params["DryRun"] = True if module.params.get("dhcp_options_ids"): - params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids") + params["DhcpOptionsIds"] = module.params.get("dhcp_options_ids") try: all_dhcp_options = client.describe_dhcp_options(aws_retry=True, **params) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - normalized_config = [normalize_ec2_vpc_dhcp_config(config['DhcpConfigurations']) for config in all_dhcp_options['DhcpOptions']] - raw_config = [camel_dict_to_snake_dict(get_dhcp_options_info(option), ignore_list=['Tags']) for option in all_dhcp_options['DhcpOptions']] + normalized_config = [ + normalize_ec2_vpc_dhcp_config(config["DhcpConfigurations"]) for config in all_dhcp_options["DhcpOptions"] + ] + raw_config = [ + camel_dict_to_snake_dict(get_dhcp_options_info(option), ignore_list=["Tags"]) + for option in all_dhcp_options["DhcpOptions"] + ] return raw_config, normalized_config def main(): argument_spec = dict( - filters=dict(type='dict', default={}), - dry_run=dict(type='bool', default=False), - dhcp_options_ids=dict(type='list', elements='str'), + filters=dict(type="dict", default={}), + dry_run=dict(type="bool", default=False), + dhcp_options_ids=dict(type="list", elements="str"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) # call your function here results, normalized_config = list_dhcp_options(client, module) @@ -212,5 +213,5 @@ def main(): module.exit_json(dhcp_options=results, dhcp_config=normalized_config) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py index 080610eb6..c894412eb 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_endpoint short_description: Create and delete AWS VPC endpoints version_added: 1.0.0 @@ -55,24 +53,10 @@ options: description: - A properly formatted JSON policy as string, see U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813). - Cannot be used with I(policy_file). - Option when creating an endpoint. If not provided AWS will utilise a default policy which provides full access to the service. required: false type: json - policy_file: - description: - - The path to the properly json formatted policy file, see - U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) - on how to use it properly. Cannot be used with I(policy). - - Option when creating an endpoint. If not provided AWS will - utilise a default policy which provides full access to the service. - - This option has been deprecated and will be removed after 2022-12-01 - to maintain the existing functionality please use the I(policy) option - and a file lookup. - required: false - aliases: [ "policy_path" ] - type: path state: description: - C(present) to ensure resource is created. @@ -122,14 +106,16 @@ author: - Karen Cheng (@Etherdaemon) notes: - Support for I(tags) and I(purge_tags) was added in release 1.5.0. + - The C(policy_file) paramater was removed in release 6.0.0 please use the + I(policy) option and a file lookup instead. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create new vpc endpoint with a json template for policy @@ -155,26 +141,14 @@ EXAMPLES = r''' - rtb-87654321 register: new_vpc_endpoint -- name: Create new vpc endpoint with json file - amazon.aws.ec2_vpc_endpoint: - state: present - region: ap-southeast-2 - vpc_id: vpc-12345678 - service: com.amazonaws.ap-southeast-2.s3 - policy_file: "{{ role_path }}/files/endpoint_policy.json" - route_table_ids: - - rtb-12345678 - - rtb-87654321 - register: new_vpc_endpoint - - name: Delete newly created vpc endpoint amazon.aws.ec2_vpc_endpoint: state: absent vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}" region: ap-southeast-2 -''' +""" -RETURN = r''' +RETURN = r""" endpoints: description: The resulting endpoints from the module call returned: success @@ -206,7 +180,7 @@ endpoints: "vpc_id": "vpc-abbad0d0" } ] -''' +""" import datetime import json @@ -217,29 +191,29 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.six import string_types from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter def get_endpoints(client, module, endpoint_id=None): params = dict() if endpoint_id: - params['VpcEndpointIds'] = [endpoint_id] + params["VpcEndpointIds"] = [endpoint_id] else: filters = list() - if module.params.get('service'): - filters.append({'Name': 'service-name', 'Values': [module.params.get('service')]}) - if module.params.get('vpc_id'): - filters.append({'Name': 'vpc-id', 'Values': [module.params.get('vpc_id')]}) - params['Filters'] = filters + if module.params.get("service"): + filters.append({"Name": "service-name", "Values": [module.params.get("service")]}) + if module.params.get("vpc_id"): + filters.append({"Name": "vpc-id", "Values": [module.params.get("vpc_id")]}) + params["Filters"] = filters try: result = client.describe_vpc_endpoints(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -257,119 +231,125 @@ def match_endpoints(route_table_ids, service_name, vpc_id, endpoint): if route_table_ids: sorted_route_table_ids = sorted(route_table_ids) - if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name: - sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds']) + if endpoint["VpcId"] == vpc_id and endpoint["ServiceName"] == service_name: + sorted_endpoint_rt_ids = sorted(endpoint["RouteTableIds"]) if sorted_endpoint_rt_ids == sorted_route_table_ids: found = True return found def setup_creation(client, module): - endpoint_id = module.params.get('vpc_endpoint_id') - route_table_ids = module.params.get('route_table_ids') - service_name = module.params.get('service') - vpc_id = module.params.get('vpc_id') + endpoint_id = module.params.get("vpc_endpoint_id") + route_table_ids = module.params.get("route_table_ids") + service_name = module.params.get("service") + vpc_id = module.params.get("vpc_id") changed = False if not endpoint_id: # Try to use the module parameters to match any existing endpoints all_endpoints = get_endpoints(client, module, endpoint_id) - if len(all_endpoints['VpcEndpoints']) > 0: - for endpoint in all_endpoints['VpcEndpoints']: + if len(all_endpoints["VpcEndpoints"]) > 0: + for endpoint in all_endpoints["VpcEndpoints"]: if match_endpoints(route_table_ids, service_name, vpc_id, endpoint): - endpoint_id = endpoint['VpcEndpointId'] + endpoint_id = endpoint["VpcEndpointId"] break if endpoint_id: # If we have an endpoint now, just ensure tags and exit - if module.params.get('tags'): - changed |= ensure_ec2_tags(client, module, endpoint_id, - resource_type='vpc-endpoint', - tags=module.params.get('tags'), - purge_tags=module.params.get('purge_tags')) - normalized_result = get_endpoints(client, module, endpoint_id=endpoint_id)['VpcEndpoints'][0] - return changed, camel_dict_to_snake_dict(normalized_result, ignore_list=['Tags']) + if module.params.get("tags"): + changed |= ensure_ec2_tags( + client, + module, + endpoint_id, + resource_type="vpc-endpoint", + tags=module.params.get("tags"), + purge_tags=module.params.get("purge_tags"), + ) + normalized_result = get_endpoints(client, module, endpoint_id=endpoint_id)["VpcEndpoints"][0] + return changed, camel_dict_to_snake_dict(normalized_result, ignore_list=["Tags"]) changed, result = create_vpc_endpoint(client, module) - return changed, camel_dict_to_snake_dict(result, ignore_list=['Tags']) + return changed, camel_dict_to_snake_dict(result, ignore_list=["Tags"]) def create_vpc_endpoint(client, module): params = dict() changed = False token_provided = False - params['VpcId'] = module.params.get('vpc_id') - params['VpcEndpointType'] = module.params.get('vpc_endpoint_type') - params['ServiceName'] = module.params.get('service') + params["VpcId"] = module.params.get("vpc_id") + params["VpcEndpointType"] = module.params.get("vpc_endpoint_type") + params["ServiceName"] = module.params.get("service") - if module.params.get('vpc_endpoint_type') != 'Gateway' and module.params.get('route_table_ids'): + if module.params.get("vpc_endpoint_type") != "Gateway" and module.params.get("route_table_ids"): module.fail_json(msg="Route table IDs are only supported for Gateway type VPC Endpoint.") if module.check_mode: changed = True - result = 'Would have created VPC Endpoint if not in check mode' + result = "Would have created VPC Endpoint if not in check mode" module.exit_json(changed=changed, result=result) - if module.params.get('route_table_ids'): - params['RouteTableIds'] = module.params.get('route_table_ids') + if module.params.get("route_table_ids"): + params["RouteTableIds"] = module.params.get("route_table_ids") - if module.params.get('vpc_endpoint_subnets'): - params['SubnetIds'] = module.params.get('vpc_endpoint_subnets') + if module.params.get("vpc_endpoint_subnets"): + params["SubnetIds"] = module.params.get("vpc_endpoint_subnets") - if module.params.get('vpc_endpoint_security_groups'): - params['SecurityGroupIds'] = module.params.get('vpc_endpoint_security_groups') + if module.params.get("vpc_endpoint_security_groups"): + params["SecurityGroupIds"] = module.params.get("vpc_endpoint_security_groups") - if module.params.get('client_token'): + if module.params.get("client_token"): token_provided = True request_time = datetime.datetime.utcnow() - params['ClientToken'] = module.params.get('client_token') + params["ClientToken"] = module.params.get("client_token") policy = None - if module.params.get('policy'): + if module.params.get("policy"): try: - policy = json.loads(module.params.get('policy')) + policy = json.loads(module.params.get("policy")) except ValueError as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - - elif module.params.get('policy_file'): - try: - with open(module.params.get('policy_file'), 'r') as json_data: - policy = json.load(json_data) - except (OSError, json.JSONDecodeError) as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + module.fail_json(msg=str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) if policy: - params['PolicyDocument'] = json.dumps(policy) + params["PolicyDocument"] = json.dumps(policy) - if module.params.get('tags'): - params["TagSpecifications"] = boto3_tag_specifications(module.params.get('tags'), ['vpc-endpoint']) + if module.params.get("tags"): + params["TagSpecifications"] = boto3_tag_specifications(module.params.get("tags"), ["vpc-endpoint"]) try: changed = True - result = client.create_vpc_endpoint(aws_retry=True, **params)['VpcEndpoint'] - if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)): + result = client.create_vpc_endpoint(aws_retry=True, **params)["VpcEndpoint"] + if token_provided and (request_time > result["creation_timestamp"].replace(tzinfo=None)): changed = False - elif module.params.get('wait') and not module.check_mode: + elif module.params.get("wait") and not module.check_mode: try: - waiter = get_waiter(client, 'vpc_endpoint_exists') - waiter.wait(VpcEndpointIds=[result['VpcEndpointId']], WaiterConfig=dict(Delay=15, MaxAttempts=module.params.get('wait_timeout') // 15)) + waiter = get_waiter(client, "vpc_endpoint_exists") + waiter.wait( + VpcEndpointIds=[result["VpcEndpointId"]], + WaiterConfig=dict(Delay=15, MaxAttempts=module.params.get("wait_timeout") // 15), + ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(msg='Error waiting for vpc endpoint to become available - please check the AWS console') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failure while waiting for status') - - except is_boto3_error_code('IdempotentParameterMismatch'): # pylint: disable=duplicate-except + module.fail_json_aws( + msg="Error waiting for vpc endpoint to become available - please check the AWS console" + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failure while waiting for status") + + except is_boto3_error_code("IdempotentParameterMismatch"): # pylint: disable=duplicate-except module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") - except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except + except is_boto3_error_code("RouteAlreadyExists"): # pylint: disable=duplicate-except module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to create VPC.") # describe and normalize iso datetime fields in result after adding tags - normalized_result = get_endpoints(client, module, endpoint_id=result['VpcEndpointId'])['VpcEndpoints'][0] + normalized_result = get_endpoints(client, module, endpoint_id=result["VpcEndpointId"])["VpcEndpoints"][0] return changed, normalized_result @@ -379,36 +359,44 @@ def setup_removal(client, module): if module.check_mode: try: - exists = client.describe_vpc_endpoints(aws_retry=True, VpcEndpointIds=[module.params.get('vpc_endpoint_id')]) + exists = client.describe_vpc_endpoints( + aws_retry=True, VpcEndpointIds=[module.params.get("vpc_endpoint_id")] + ) if exists: - result = {'msg': 'Would have deleted VPC Endpoint if not in check mode'} + result = {"msg": "Would have deleted VPC Endpoint if not in check mode"} changed = True - except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): - result = {'msg': 'Endpoint does not exist, nothing to delete.'} + except is_boto3_error_code("InvalidVpcEndpointId.NotFound"): + result = {"msg": "Endpoint does not exist, nothing to delete."} changed = False - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get endpoints") return changed, result - if isinstance(module.params.get('vpc_endpoint_id'), string_types): - params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')] + if isinstance(module.params.get("vpc_endpoint_id"), string_types): + params["VpcEndpointIds"] = [module.params.get("vpc_endpoint_id")] else: - params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id') + params["VpcEndpointIds"] = module.params.get("vpc_endpoint_id") try: - result = client.delete_vpc_endpoints(aws_retry=True, **params)['Unsuccessful'] - if len(result) < len(params['VpcEndpointIds']): + result = client.delete_vpc_endpoints(aws_retry=True, **params)["Unsuccessful"] + if len(result) < len(params["VpcEndpointIds"]): changed = True # For some reason delete_vpc_endpoints doesn't throw exceptions it # returns a list of failed 'results' instead. Throw these so we can # catch them the way we expect for r in result: try: - raise botocore.exceptions.ClientError(r, 'delete_vpc_endpoints') - except is_boto3_error_code('InvalidVpcEndpoint.NotFound'): + raise botocore.exceptions.ClientError(r, "delete_vpc_endpoints") + except is_boto3_error_code("InvalidVpcEndpoint.NotFound"): continue - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "Failed to delete VPC endpoint") return changed, result @@ -416,61 +404,71 @@ def setup_removal(client, module): def main(): argument_spec = dict( vpc_id=dict(), - vpc_endpoint_type=dict(default='Gateway', choices=['Interface', 'Gateway', 'GatewayLoadBalancer']), - vpc_endpoint_security_groups=dict(type='list', elements='str'), - vpc_endpoint_subnets=dict(type='list', elements='str'), + vpc_endpoint_type=dict(default="Gateway", choices=["Interface", "Gateway", "GatewayLoadBalancer"]), + vpc_endpoint_security_groups=dict(type="list", elements="str"), + vpc_endpoint_subnets=dict(type="list", elements="str"), service=dict(), - policy=dict(type='json'), - policy_file=dict(type='path', aliases=['policy_path']), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=320, required=False), - route_table_ids=dict(type='list', elements='str'), + policy=dict(type="json"), + state=dict(default="present", choices=["present", "absent"]), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=320, required=False), + route_table_ids=dict(type="list", elements="str"), vpc_endpoint_id=dict(), client_token=dict(no_log=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['policy', 'policy_file']], required_if=[ - ['state', 'present', ['vpc_id', 'service']], - ['state', 'absent', ['vpc_endpoint_id']], + ["state", "present", ["vpc_id", "service"]], + ["state", "absent", ["vpc_endpoint_id"]], ], ) # Validate Requirements - state = module.params.get('state') - - if module.params.get('policy_file'): - module.deprecate('The policy_file option has been deprecated and' - ' will be removed after 2022-12-01', - date='2022-12-01', collection_name='amazon.aws') - - if module.params.get('vpc_endpoint_type'): - if module.params.get('vpc_endpoint_type') == 'Gateway': - if module.params.get('vpc_endpoint_subnets') or module.params.get('vpc_endpoint_security_groups'): - module.fail_json(msg="Parameter vpc_endpoint_subnets and/or vpc_endpoint_security_groups can't be used with Gateway endpoint type") - - if module.params.get('vpc_endpoint_type') == 'GatewayLoadBalancer': - if module.params.get('vpc_endpoint_security_groups'): - module.fail_json(msg="Parameter vpc_endpoint_security_groups can't be used with GatewayLoadBalancer endpoint type") - - if module.params.get('vpc_endpoint_type') == 'Interface': - if module.params.get('vpc_endpoint_subnets') and not module.params.get('vpc_endpoint_security_groups'): - module.fail_json(msg="Parameter vpc_endpoint_security_groups must be set when endpoint type is Interface and vpc_endpoint_subnets is defined") - if not module.params.get('vpc_endpoint_subnets') and module.params.get('vpc_endpoint_security_groups'): - module.fail_json(msg="Parameter vpc_endpoint_subnets must be set when endpoint type is Interface and vpc_endpoint_security_groups is defined") + state = module.params.get("state") + + if module.params.get("vpc_endpoint_type"): + if module.params.get("vpc_endpoint_type") == "Gateway": + if module.params.get("vpc_endpoint_subnets") or module.params.get("vpc_endpoint_security_groups"): + module.fail_json( + msg=( + "Parameter vpc_endpoint_subnets and/or vpc_endpoint_security_groups can't be used with Gateway" + " endpoint type" + ) + ) + + if module.params.get("vpc_endpoint_type") == "GatewayLoadBalancer": + if module.params.get("vpc_endpoint_security_groups"): + module.fail_json( + msg="Parameter vpc_endpoint_security_groups can't be used with GatewayLoadBalancer endpoint type" + ) + + if module.params.get("vpc_endpoint_type") == "Interface": + if module.params.get("vpc_endpoint_subnets") and not module.params.get("vpc_endpoint_security_groups"): + module.fail_json( + msg=( + "Parameter vpc_endpoint_security_groups must be set when endpoint type is Interface and" + " vpc_endpoint_subnets is defined" + ) + ) + if not module.params.get("vpc_endpoint_subnets") and module.params.get("vpc_endpoint_security_groups"): + module.fail_json( + msg=( + "Parameter vpc_endpoint_subnets must be set when endpoint type is Interface and" + " vpc_endpoint_security_groups is defined" + ) + ) try: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # Ensure resource is present - if state == 'present': + if state == "present": (changed, results) = setup_creation(ec2, module) else: (changed, results) = setup_removal(ec2, module) @@ -478,5 +476,5 @@ def main(): module.exit_json(changed=changed, result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py index 11a362812..e94cf1a94 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py @@ -1,30 +1,16 @@ #!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_endpoint_info short_description: Retrieves AWS VPC endpoints details using AWS methods version_added: 1.0.0 description: - Gets various details related to AWS VPC endpoints. options: - query: - description: - - Defaults to C(endpoints). - - Specifies the query action to take. - - I(query=endpoints) returns information about AWS VPC endpoints. - - Retrieving information about services using I(query=services) has been - deprecated in favour of the M(amazon.aws.ec2_vpc_endpoint_service_info) module. - - The I(query) option has been deprecated and will be removed after 2022-12-01. - required: False - choices: - - services - - endpoints - type: str vpc_endpoint_ids: description: - The IDs of specific endpoints to retrieve the details of. @@ -37,30 +23,27 @@ options: for possible filters. type: dict default: {} -author: Karen Cheng (@Etherdaemon) +author: + - Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' - -EXAMPLES = r''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +notes: + - Support for the C(query) parameter was dropped in release 6.0.0. This module now only queries + for endpoints. Information about endpoint services can be retrieved using the + M(amazon.aws.ec2_vpc_endpoint_service_info) module. +""" + +EXAMPLES = r""" # Simple example of listing all support AWS services for VPC endpoints -- name: List supported AWS endpoint services - amazon.aws.ec2_vpc_endpoint_info: - query: services - region: ap-southeast-2 - register: supported_endpoint_services - - name: Get all endpoints in ap-southeast-2 region amazon.aws.ec2_vpc_endpoint_info: - query: endpoints region: ap-southeast-2 register: existing_endpoints - name: Get all endpoints with specific filters amazon.aws.ec2_vpc_endpoint_info: - query: endpoints region: ap-southeast-2 filters: vpc-id: @@ -73,27 +56,17 @@ EXAMPLES = r''' - name: Get details on specific endpoint amazon.aws.ec2_vpc_endpoint_info: - query: endpoints region: ap-southeast-2 vpc_endpoint_ids: - vpce-12345678 register: endpoint_details -''' +""" -RETURN = r''' -service_names: - description: AWS VPC endpoint service names. - returned: I(query) is C(services) - type: list - elements: str - sample: - service_names: - - com.amazonaws.ap-southeast-2.s3 +RETURN = r""" vpc_endpoints: description: - - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp, - policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id. - returned: I(query) is C(endpoints) + - A list of matching endpoints. + returned: always type: list elements: dict contains: @@ -197,7 +170,7 @@ vpc_endpoints: state: "available" vpc_endpoint_id: "vpce-abbad0d0" vpc_id: "vpc-1111ffff" -''' +""" try: import botocore @@ -206,47 +179,34 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff() def _describe_endpoints(client, **params): - paginator = client.get_paginator('describe_vpc_endpoints') + paginator = client.get_paginator("describe_vpc_endpoints") return paginator.paginate(**params).build_full_result() -@AWSRetry.jittered_backoff() -def _describe_endpoint_services(client, **params): - paginator = client.get_paginator('describe_vpc_endpoint_services') - return paginator.paginate(**params).build_full_result() - - -def get_supported_services(client, module): - try: - services = _describe_endpoint_services(client) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to get endpoint servicess") - - results = list(services['ServiceNames']) - return dict(service_names=results) - - def get_endpoints(client, module): results = list() params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - if module.params.get('vpc_endpoint_ids'): - params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("vpc_endpoint_ids"): + params["VpcEndpointIds"] = module.params.get("vpc_endpoint_ids") try: - results = _describe_endpoints(client, **params)['VpcEndpoints'] + results = _describe_endpoints(client, **params)["VpcEndpoints"] results = normalize_boto3_result(results) - except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): - module.exit_json(msg='VpcEndpoint {0} does not exist'.format(module.params.get('vpc_endpoint_ids')), vpc_endpoints=[]) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidVpcEndpointId.NotFound"): + module.exit_json(msg=f"VpcEndpoint {module.params.get('vpc_endpoint_ids')} does not exist", vpc_endpoints=[]) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get endpoints") return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results]) @@ -254,45 +214,22 @@ def get_endpoints(client, module): def main(): argument_spec = dict( - query=dict(choices=['services', 'endpoints'], required=False), - filters=dict(default={}, type='dict'), - vpc_endpoint_ids=dict(type='list', elements='str'), + filters=dict(default={}, type="dict"), + vpc_endpoint_ids=dict(type="list", elements="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements try: - connection = module.client('ec2') + connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - query = module.params.get('query') - if query == 'endpoints': - module.deprecate('The query option has been deprecated and' - ' will be removed after 2022-12-01. Searching for' - ' `endpoints` is now the default and after' - ' 2022-12-01 this module will only support fetching' - ' endpoints.', - date='2022-12-01', collection_name='amazon.aws') - elif query == 'services': - module.deprecate('Support for fetching service information with this ' - 'module has been deprecated and will be removed after' - ' 2022-12-01. ' - 'Please use the ec2_vpc_endpoint_service_info module ' - 'instead.', date='2022-12-01', - collection_name='amazon.aws') - else: - query = 'endpoints' + module.fail_json_aws(e, msg="Failed to connect to AWS") - invocations = { - 'services': get_supported_services, - 'endpoints': get_endpoints, - } - results = invocations[query](connection, module) + results = get_endpoints(connection, module) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py index fefd39421..e462cfefd 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_endpoint_service_info short_description: Retrieves AWS VPC endpoint service details version_added: 1.5.0 @@ -28,20 +27,20 @@ options: author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of listing all supported AWS services for VPC endpoints - name: List supported AWS endpoint services amazon.aws.ec2_vpc_endpoint_service_info: region: ap-southeast-2 register: supported_endpoint_services -''' +""" -RETURN = r''' +RETURN = r""" service_names: description: List of supported AWS VPC endpoint service names. returned: success @@ -110,7 +109,7 @@ service_details: - The verification state of the VPC endpoint service. - Consumers of an endpoint service cannot use the private name when the state is not C(verified). type: str -''' +""" try: import botocore @@ -119,62 +118,62 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list # We're using a paginator so we can't use the client decorators @AWSRetry.jittered_backoff() def get_services(client, module): - paginator = client.get_paginator('describe_vpc_endpoint_services') + paginator = client.get_paginator("describe_vpc_endpoint_services") params = {} if module.params.get("filters"): - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) if module.params.get("service_names"): - params['ServiceNames'] = module.params.get("service_names") + params["ServiceNames"] = module.params.get("service_names") results = paginator.paginate(**params).build_full_result() return results def normalize_service(service): - normalized = camel_dict_to_snake_dict(service, ignore_list=['Tags']) - normalized["tags"] = boto3_tag_list_to_ansible_dict(service.get('Tags')) + normalized = camel_dict_to_snake_dict(service, ignore_list=["Tags"]) + normalized["tags"] = boto3_tag_list_to_ansible_dict(service.get("Tags")) return normalized def normalize_result(result): normalized = {} - normalized['service_details'] = [normalize_service(service) for service in result.get('ServiceDetails')] - normalized['service_names'] = result.get('ServiceNames', []) + normalized["service_details"] = [normalize_service(service) for service in result.get("ServiceDetails")] + normalized["service_names"] = result.get("ServiceNames", []) return normalized def main(): argument_spec = dict( - filters=dict(default={}, type='dict'), - service_names=dict(type='list', elements='str'), + filters=dict(default={}, type="dict"), + service_names=dict(type="list", elements="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements try: - client = module.client('ec2') + client = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") try: results = get_services(client, module) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to retrieve service details') + module.fail_json_aws(e, msg="Failed to connect to retrieve service details") normalized_result = normalize_result(results) module.exit_json(changed=False, **normalized_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py index 99106b03c..b19507a9c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_igw version_added: 1.0.0 @@ -15,10 +13,17 @@ description: - Manage an AWS VPC Internet gateway author: Robert Estelle (@erydo) options: + internet_gateway_id: + version_added: 7.0.0 + description: + - The ID of Internet Gateway to manage. + required: false + type: str vpc_id: description: - - The VPC ID for the VPC in which to manage the Internet Gateway. - required: true + - The VPC ID for the VPC to attach (when state=present) + - VPC ID can also be provided to find the internet gateway to manage that the VPC is attached to + required: false type: str state: description: @@ -26,16 +31,31 @@ options: default: present choices: [ 'present', 'absent' ] type: str + force_attach: + version_added: 7.0.0 + description: + - Force attaching VPC to I(vpc_id). + - Setting this option to true will detach an existing VPC attachment and attach to the supplied I(vpc_id). + - Ignored when I(state=absent). + - I(vpc_id) must be specified when I(force_attach) is true + default: false + type: bool + detach_vpc: + version_added: 7.0.0 + description: + - Remove attached VPC from gateway + default: false + type: bool notes: - Support for I(purge_tags) was added in release 1.3.0. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.tags -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Ensure that the VPC has an Internet Gateway. @@ -51,18 +71,44 @@ EXAMPLES = ''' vpc_id: vpc-abcdefgh state: present tags: - Tag1: tag1 - Tag2: tag2 + Tag1: tag1 + Tag2: tag2 register: igw -- name: Delete Internet gateway +- name: Create a detached gateway + amazon.aws.ec2_vpc_igw: + state: present + register: igw + +- name: Change the VPC the gateway is attached to + amazon.aws.ec2_vpc_igw: + internet_gateway_id: igw-abcdefgh + vpc_id: vpc-stuvwxyz + force_attach: true + state: present + register: igw + +- name: Delete Internet gateway using the attached vpc id + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: vpc-abcdefgh + register: vpc_igw_delete + +- name: Delete Internet gateway with gateway id amazon.aws.ec2_vpc_igw: state: absent + internet_gateway_id: igw-abcdefgh + register: vpc_igw_delete + +- name: Delete Internet gateway ensuring attached VPC is correct + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: igw-abcdefgh vpc_id: vpc-abcdefgh register: vpc_igw_delete -''' +""" -RETURN = ''' +RETURN = r""" changed: description: If any changes have been made to the Internet Gateway. type: bool @@ -88,63 +134,70 @@ vpc_id: returned: I(state=present) sample: vpc_id: "vpc-XXXXXXXX" -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @AWSRetry.jittered_backoff(retries=10, delay=10) def describe_igws_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_internet_gateways') - return paginator.paginate(**params).build_full_result()['InternetGateways'] + paginator = connection.get_paginator("describe_internet_gateways") + return paginator.paginate(**params).build_full_result()["InternetGateways"] + +def describe_vpcs_with_backoff(connection, **params): + paginator = connection.get_paginator("describe_vpcs") + return paginator.paginate(**params).build_full_result()["Vpcs"] -class AnsibleEc2Igw(): +class AnsibleEc2Igw: def __init__(self, module, results): self._module = module self._results = results - self._connection = self._module.client( - 'ec2', retry_decorator=AWSRetry.jittered_backoff() - ) + self._connection = self._module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) self._check_mode = self._module.check_mode def process(self): - vpc_id = self._module.params.get('vpc_id') - state = self._module.params.get('state', 'present') - tags = self._module.params.get('tags') - purge_tags = self._module.params.get('purge_tags') - - if state == 'present': - self.ensure_igw_present(vpc_id, tags, purge_tags) - elif state == 'absent': - self.ensure_igw_absent(vpc_id) + internet_gateway_id = self._module.params.get("internet_gateway_id") + vpc_id = self._module.params.get("vpc_id") + state = self._module.params.get("state", "present") + tags = self._module.params.get("tags") + purge_tags = self._module.params.get("purge_tags") + force_attach = self._module.params.get("force_attach") + detach_vpc = self._module.params.get("detach_vpc") + + if state == "present": + self.ensure_igw_present(internet_gateway_id, vpc_id, tags, purge_tags, force_attach, detach_vpc) + elif state == "absent": + self.ensure_igw_absent(internet_gateway_id, vpc_id) def get_matching_igw(self, vpc_id, gateway_id=None): - ''' + """ Returns the internet gateway found. Parameters: vpc_id (str): VPC ID gateway_id (str): Internet Gateway ID, if specified Returns: igw (dict): dict of igw found, None if none found - ''' - filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) + """ try: # If we know the gateway_id, use it to avoid bugs with using filters # See https://github.com/ansible-collections/amazon.aws/pull/766 if not gateway_id: + filters = ansible_dict_to_boto3_filter_list({"attachment.vpc-id": vpc_id}) igws = describe_igws_with_backoff(self._connection, Filters=filters) else: igws = describe_igws_with_backoff(self._connection, InternetGatewayIds=[gateway_id]) @@ -153,88 +206,179 @@ class AnsibleEc2Igw(): igw = None if len(igws) > 1: - self._module.fail_json( - msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting' - .format(vpc_id)) + self._module.fail_json(msg=f"EC2 returned more than one Internet Gateway for VPC {vpc_id}, aborting") elif igws: igw = camel_dict_to_snake_dict(igws[0]) return igw + def get_matching_vpc(self, vpc_id): + """ + Returns the virtual private cloud found. + Parameters: + vpc_id (str): VPC ID + Returns: + vpc (dict): dict of vpc found, None if none found + """ + try: + vpcs = describe_vpcs_with_backoff(self._connection, VpcIds=[vpc_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # self._module.fail_json(msg=f"{str(e)}") + if "InvalidVpcID.NotFound" in str(e): + self._module.fail_json(msg=f"VPC with Id {vpc_id} not found, aborting") + self._module.fail_json_aws(e) + + vpc = None + if len(vpcs) > 1: + self._module.fail_json(msg=f"EC2 returned more than one VPC for {vpc_id}, aborting") + elif vpcs: + vpc = camel_dict_to_snake_dict(vpcs[0]) + + return vpc + @staticmethod def get_igw_info(igw, vpc_id): return { - 'gateway_id': igw['internet_gateway_id'], - 'tags': boto3_tag_list_to_ansible_dict(igw['tags']), - 'vpc_id': vpc_id + "gateway_id": igw["internet_gateway_id"], + "tags": boto3_tag_list_to_ansible_dict(igw["tags"]), + "vpc_id": vpc_id, } - def ensure_igw_absent(self, vpc_id): - igw = self.get_matching_igw(vpc_id) + def detach_vpc(self, igw_id, vpc_id): + try: + self._connection.detach_internet_gateway(aws_retry=True, InternetGatewayId=igw_id, VpcId=vpc_id) + + self._results["changed"] = True + except botocore.exceptions.WaiterError as e: + self._module.fail_json_aws(e, msg="Unable to detach VPC.") + + def attach_vpc(self, igw_id, vpc_id): + try: + self._connection.attach_internet_gateway(aws_retry=True, InternetGatewayId=igw_id, VpcId=vpc_id) + + # Ensure the gateway is attached before proceeding + waiter = get_waiter(self._connection, "internet_gateway_attached") + waiter.wait(InternetGatewayIds=[igw_id]) + + self._results["changed"] = True + except botocore.exceptions.WaiterError as e: + self._module.fail_json_aws(e, msg="Failed to attach VPC.") + + def ensure_igw_absent(self, igw_id, vpc_id): + igw = self.get_matching_igw(vpc_id, gateway_id=igw_id) if igw is None: return self._results + igw_vpc_id = "" + + if len(igw["attachments"]) > 0: + igw_vpc_id = igw["attachments"][0]["vpc_id"] + + if vpc_id and (igw_vpc_id != vpc_id): + self._module.fail_json(msg=f"Supplied VPC ({vpc_id}) does not match found VPC ({igw_vpc_id}), aborting") + if self._check_mode: - self._results['changed'] = True + self._results["changed"] = True return self._results try: - self._results['changed'] = True - self._connection.detach_internet_gateway( - aws_retry=True, - InternetGatewayId=igw['internet_gateway_id'], - VpcId=vpc_id - ) - self._connection.delete_internet_gateway( - aws_retry=True, - InternetGatewayId=igw['internet_gateway_id'] - ) + self._results["changed"] = True + + if igw_vpc_id: + self.detach_vpc(igw["internet_gateway_id"], igw_vpc_id) + + self._connection.delete_internet_gateway(aws_retry=True, InternetGatewayId=igw["internet_gateway_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway") return self._results - def ensure_igw_present(self, vpc_id, tags, purge_tags): - igw = self.get_matching_igw(vpc_id) + def ensure_igw_present(self, igw_id, vpc_id, tags, purge_tags, force_attach, detach_vpc): + igw = None + + if igw_id: + igw = self.get_matching_igw(None, gateway_id=igw_id) + elif vpc_id: + igw = self.get_matching_igw(vpc_id) if igw is None: if self._check_mode: - self._results['changed'] = True - self._results['gateway_id'] = None + self._results["changed"] = True + self._results["gateway_id"] = None return self._results + if vpc_id: + self.get_matching_vpc(vpc_id) + try: - response = self._connection.create_internet_gateway(aws_retry=True) + create_params = {} + if tags: + create_params["TagSpecifications"] = boto3_tag_specifications(tags, types="internet-gateway") + response = self._connection.create_internet_gateway(aws_retry=True, **create_params) # Ensure the gateway exists before trying to attach it or add tags - waiter = get_waiter(self._connection, 'internet_gateway_exists') - waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']]) - - igw = camel_dict_to_snake_dict(response['InternetGateway']) - self._connection.attach_internet_gateway( - aws_retry=True, - InternetGatewayId=igw['internet_gateway_id'], - VpcId=vpc_id - ) - - # Ensure the gateway is attached before proceeding - waiter = get_waiter(self._connection, 'internet_gateway_attached') - waiter.wait(InternetGatewayIds=[igw['internet_gateway_id']]) - self._results['changed'] = True + waiter = get_waiter(self._connection, "internet_gateway_exists") + waiter.wait(InternetGatewayIds=[response["InternetGateway"]["InternetGatewayId"]]) + self._results["changed"] = True + + igw = camel_dict_to_snake_dict(response["InternetGateway"]) + + if vpc_id: + self.attach_vpc(igw["internet_gateway_id"], vpc_id) except botocore.exceptions.WaiterError as e: self._module.fail_json_aws(e, msg="No Internet Gateway exists.") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg='Unable to create Internet Gateway') + self._module.fail_json_aws(e, msg="Unable to create Internet Gateway") + else: + igw_vpc_id = None + + if len(igw["attachments"]) > 0: + igw_vpc_id = igw["attachments"][0]["vpc_id"] + + if detach_vpc: + if self._check_mode: + self._results["changed"] = True + self._results["gateway_id"] = igw["internet_gateway_id"] + return self._results + + self.detach_vpc(igw["internet_gateway_id"], igw_vpc_id) + + elif igw_vpc_id != vpc_id: + if self._check_mode: + self._results["changed"] = True + self._results["gateway_id"] = igw["internet_gateway_id"] + return self._results + + if force_attach: + self.get_matching_vpc(vpc_id) + + self.detach_vpc(igw["internet_gateway_id"], igw_vpc_id) + self.attach_vpc(igw["internet_gateway_id"], vpc_id) + else: + self._module.fail_json(msg="VPC already attached, but does not match requested VPC.") + + elif vpc_id: + if self._check_mode: + self._results["changed"] = True + self._results["gateway_id"] = igw["internet_gateway_id"] + return self._results + + self.get_matching_vpc(vpc_id) + self.attach_vpc(igw["internet_gateway_id"], vpc_id) # Modify tags - self._results['changed'] |= ensure_ec2_tags( - self._connection, self._module, igw['internet_gateway_id'], - resource_type='internet-gateway', tags=tags, purge_tags=purge_tags, - retry_codes='InvalidInternetGatewayID.NotFound' + self._results["changed"] |= ensure_ec2_tags( + self._connection, + self._module, + igw["internet_gateway_id"], + resource_type="internet-gateway", + tags=tags, + purge_tags=purge_tags, + retry_codes="InvalidInternetGatewayID.NotFound", ) # Update igw - igw = self.get_matching_igw(vpc_id, gateway_id=igw['internet_gateway_id']) + igw = self.get_matching_igw(vpc_id, gateway_id=igw["internet_gateway_id"]) igw_info = self.get_igw_info(igw, vpc_id) self._results.update(igw_info) @@ -243,24 +387,36 @@ class AnsibleEc2Igw(): def main(): argument_spec = dict( - vpc_id=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + internet_gateway_id=dict(), + vpc_id=dict(), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + force_attach=dict(default=False, type="bool"), + detach_vpc=dict(default=False, type="bool"), ) + required_if = [ + ("force_attach", True, ("vpc_id",), False), + ("state", "absent", ("internet_gateway_id", "vpc_id"), True), + ("detach_vpc", True, ("internet_gateway_id", "vpc_id"), True), + ] + + mutually_exclusive = [("force_attach", "detach_vpc")] + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, + required_if=required_if, + mutually_exclusive=mutually_exclusive, ) - results = dict( - changed=False - ) + + results = dict(changed=False) igw_manager = AnsibleEc2Igw(module=module, results=results) igw_manager.process() module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py index 5e7c1a0af..583719c04 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_igw_info version_added: 1.0.0 @@ -34,12 +32,12 @@ options: type: bool version_added: 1.3.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all Internet Gateways for an account or profile @@ -53,7 +51,7 @@ EXAMPLES = r''' region: ap-southeast-2 profile: production filters: - "tag:Name": "igw-123" + "tag:Name": "igw-123" register: igw_info - name: Gather information about a specific internet gateway by InternetGatewayId @@ -62,9 +60,9 @@ EXAMPLES = r''' profile: production internet_gateway_ids: igw-c1231234 register: igw_info -''' +""" -RETURN = r''' +RETURN = r""" changed: description: True if listing the internet gateways succeeds. type: bool @@ -102,31 +100,34 @@ internet_gateways: sample: tags: "Ansible": "Test" -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def get_internet_gateway_info(internet_gateway, convert_tags): if convert_tags: - tags = boto3_tag_list_to_ansible_dict(internet_gateway['Tags']) + tags = boto3_tag_list_to_ansible_dict(internet_gateway["Tags"]) ignore_list = ["Tags"] else: - tags = internet_gateway['Tags'] + tags = internet_gateway["Tags"] ignore_list = [] - internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'], - 'Attachments': internet_gateway['Attachments'], - 'Tags': tags} + internet_gateway_info = { + "InternetGatewayId": internet_gateway["InternetGatewayId"], + "Attachments": internet_gateway["Attachments"], + "Tags": tags, + } internet_gateway_info = camel_dict_to_snake_dict(internet_gateway_info, ignore_list=ignore_list) return internet_gateway_info @@ -135,37 +136,39 @@ def get_internet_gateway_info(internet_gateway, convert_tags): def list_internet_gateways(connection, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - convert_tags = module.params.get('convert_tags') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + convert_tags = module.params.get("convert_tags") if module.params.get("internet_gateway_ids"): - params['InternetGatewayIds'] = module.params.get("internet_gateway_ids") + params["InternetGatewayIds"] = module.params.get("internet_gateway_ids") try: all_internet_gateways = connection.describe_internet_gateways(aws_retry=True, **params) - except is_boto3_error_code('InvalidInternetGatewayID.NotFound'): - module.fail_json('InternetGateway not found') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, 'Unable to describe internet gateways') + except is_boto3_error_code("InvalidInternetGatewayID.NotFound"): + module.fail_json("InternetGateway not found") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Unable to describe internet gateways") - return [get_internet_gateway_info(igw, convert_tags) - for igw in all_internet_gateways['InternetGateways']] + return [get_internet_gateway_info(igw, convert_tags) for igw in all_internet_gateways["InternetGateways"]] def main(): argument_spec = dict( - filters=dict(type='dict', default=dict()), - internet_gateway_ids=dict(type='list', default=None, elements='str'), - convert_tags=dict(type='bool', default=True), + filters=dict(type="dict", default=dict()), + internet_gateway_ids=dict(type="list", default=None, elements="str"), + convert_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements try: - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # call your function here results = list_internet_gateways(connection, module) @@ -173,5 +176,5 @@ def main(): module.exit_json(internet_gateways=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py index 38bdf34f5..2469789df 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_nat_gateway version_added: 1.0.0 @@ -77,6 +75,16 @@ options: When specifying this option, ensure you specify the eip_address parameter as well otherwise any subsequent runs will fail. type: str + default_create: + description: + - When I(default_create=True) and I(eip_address) has been set, but not yet + allocated, the NAT gateway is created and a new EIP is automatically allocated. + - When I(default_create=False) and I(eip_address) has been set, but not yet + allocated, the module will fail. + - If I(eip_address) has not been set, this parameter has no effect. + default: false + type: bool + version_added: 6.2.0 author: - Allen Sanabria (@linuxdynasty) - Jon Hadfield (@jonhadfield) @@ -85,13 +93,13 @@ author: notes: - Support for I(tags) and I(purge_tags) was added in release 1.4.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create new nat gateway with client token. @@ -172,8 +180,8 @@ EXAMPLES = r''' allocation_id: eipalloc-12345678 region: ap-southeast-2 tags: - Tag1: tag1 - Tag2: tag2 + Tag1: tag1 + Tag2: tag2 register: new_nat_gateway - name: Update tags without purge @@ -183,12 +191,12 @@ EXAMPLES = r''' region: ap-southeast-2 purge_tags: false tags: - Tag3: tag3 + Tag3: tag3 wait: true register: update_tags_nat_gateway -''' +""" -RETURN = r''' +RETURN = r""" create_time: description: The ISO 8601 date time format in UTC. returned: In all cases. @@ -233,7 +241,7 @@ nat_gateway_addresses: 'allocation_id': 'eipalloc-12345' } ] -''' +""" import datetime @@ -242,33 +250,34 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @AWSRetry.jittered_backoff(retries=10) def _describe_nat_gateways(client, **params): try: - paginator = client.get_paginator('describe_nat_gateways') - return paginator.paginate(**params).build_full_result()['NatGateways'] - except is_boto3_error_code('InvalidNatGatewayID.NotFound'): + paginator = client.get_paginator("describe_nat_gateways") + return paginator.paginate(**params).build_full_result()["NatGateways"] + except is_boto3_error_code("InvalidNatGatewayID.NotFound"): return None def wait_for_status(client, module, waiter_name, nat_gateway_id): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") try: waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) waiter.wait( NatGatewayIds=[nat_gateway_id], - WaiterConfig={'MaxAttempts': attempts} + WaiterConfig={"MaxAttempts": attempts}, ) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="NAT gateway failed to reach expected state.") @@ -321,19 +330,13 @@ def get_nat_gateways(client, module, subnet_id=None, nat_gateway_id=None, states existing_gateways = list() if not states: - states = ['available', 'pending'] + states = ["available", "pending"] if nat_gateway_id: - params['NatGatewayIds'] = [nat_gateway_id] + params["NatGatewayIds"] = [nat_gateway_id] else: - params['Filter'] = [ - { - 'Name': 'subnet-id', - 'Values': [subnet_id] - }, - { - 'Name': 'state', - 'Values': states - } + params["Filter"] = [ + {"Name": "subnet-id", "Values": [subnet_id]}, + {"Name": "state", "Values": states}, ] try: @@ -393,15 +396,15 @@ def gateway_in_subnet_exists(client, module, subnet_id, allocation_id=None): allocation_id_exists = False gateways = [] - states = ['available', 'pending'] + states = ["available", "pending"] - gws_retrieved = (get_nat_gateways(client, module, subnet_id, states=states)) + gws_retrieved = get_nat_gateways(client, module, subnet_id, states=states) if gws_retrieved: for gw in gws_retrieved: - for address in gw['nat_gateway_addresses']: + for address in gw["nat_gateway_addresses"]: if allocation_id: - if address.get('allocation_id') == allocation_id: + if address.get("allocation_id") == allocation_id: allocation_id_exists = True gateways.append(gw) else: @@ -431,13 +434,13 @@ def get_eip_allocation_id_by_address(client, module, eip_address): """ params = { - 'PublicIps': [eip_address], + "PublicIps": [eip_address], } allocation_id = None - msg = '' + msg = "" try: - allocations = client.describe_addresses(aws_retry=True, **params)['Addresses'] + allocations = client.describe_addresses(aws_retry=True, **params)["Addresses"] if len(allocations) == 1: allocation = allocations[0] @@ -445,22 +448,20 @@ def get_eip_allocation_id_by_address(client, module, eip_address): allocation = None if allocation: - if allocation.get('Domain') != 'vpc': - msg = ( - "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP" - .format(eip_address) - ) + if allocation.get("Domain") != "vpc": + msg = f"EIP {eip_address} is a non-VPC EIP, please allocate a VPC scoped EIP" else: - allocation_id = allocation.get('AllocationId') + allocation_id = allocation.get("AllocationId") - except is_boto3_error_code('InvalidAddress.Malformed'): - module.fail_json(msg='EIP address {0} is invalid.'.format(eip_address)) - except is_boto3_error_code('InvalidAddress.NotFound'): # pylint: disable=duplicate-except - msg = ( - "EIP {0} does not exist".format(eip_address) - ) + except is_boto3_error_code("InvalidAddress.Malformed"): + module.fail_json(msg=f"EIP address {eip_address} is invalid.") + except is_boto3_error_code("InvalidAddress.NotFound"): # pylint: disable=duplicate-except + msg = f"EIP {eip_address} does not exist" allocation_id = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to describe EIP") return allocation_id, msg @@ -485,9 +486,9 @@ def allocate_eip_address(client, module): """ new_eip = None - msg = '' + msg = "" params = { - 'Domain': 'vpc', + "Domain": "vpc", } if module.check_mode: @@ -496,9 +497,9 @@ def allocate_eip_address(client, module): return ip_allocated, msg, new_eip try: - new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId'] + new_eip = client.allocate_address(aws_retry=True, **params)["AllocationId"] ip_allocated = True - msg = 'eipalloc id {0} created'.format(new_eip) + msg = f"eipalloc id {new_eip} created" except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -525,21 +526,24 @@ def release_address(client, module, allocation_id): Tuple (bool, str) """ - msg = '' + msg = "" if module.check_mode: - return True, '' + return True, "" ip_released = False try: client.describe_addresses(aws_retry=True, AllocationIds=[allocation_id]) - except is_boto3_error_code('InvalidAllocationID.NotFound') as e: + except is_boto3_error_code("InvalidAllocationID.NotFound") as e: # IP address likely already released # Happens with gateway in 'deleted' state that # still lists associations return True, e - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) try: @@ -551,8 +555,7 @@ def release_address(client, module, allocation_id): return ip_released, msg -def create(client, module, subnet_id, allocation_id, tags, client_token=None, - wait=False, connectivity_type='public'): +def create(client, module, subnet_id, allocation_id, tags, client_token=None, wait=False, connectivity_type="public"): """Create an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client @@ -602,67 +605,74 @@ def create(client, module, subnet_id, allocation_id, tags, client_token=None, Tuple (bool, str, list) """ - params = { - 'SubnetId': subnet_id, - 'ConnectivityType': connectivity_type - } + params = {"SubnetId": subnet_id, "ConnectivityType": connectivity_type} if connectivity_type == "public": - params.update({'AllocationId': allocation_id}) + params.update({"AllocationId": allocation_id}) request_time = datetime.datetime.utcnow() changed = False token_provided = False result = {} - msg = '' + msg = "" if client_token: token_provided = True - params['ClientToken'] = client_token + params["ClientToken"] = client_token if tags: - params["TagSpecifications"] = boto3_tag_specifications(tags, ['natgateway']) + params["TagSpecifications"] = boto3_tag_specifications(tags, ["natgateway"]) if module.check_mode: changed = True return changed, result, msg try: - result = camel_dict_to_snake_dict( - client.create_nat_gateway(aws_retry=True, **params)["NatGateway"] - ) + result = camel_dict_to_snake_dict(client.create_nat_gateway(aws_retry=True, **params)["NatGateway"]) changed = True - create_time = result['create_time'].replace(tzinfo=None) + create_time = result["create_time"].replace(tzinfo=None) if token_provided and (request_time > create_time): changed = False - elif wait and result.get('state') != 'available': - wait_for_status(client, module, 'nat_gateway_available', result['nat_gateway_id']) + elif wait and result.get("state") != "available": + wait_for_status(client, module, "nat_gateway_available", result["nat_gateway_id"]) # Get new result result = camel_dict_to_snake_dict( - _describe_nat_gateways(client, NatGatewayIds=[result['nat_gateway_id']])[0] + _describe_nat_gateways(client, NatGatewayIds=[result["nat_gateway_id"]])[0] ) - except is_boto3_error_code('IdempotentParameterMismatch') as e: - msg = ( - 'NAT Gateway does not support update and token has already been provided:' + e - ) + except is_boto3_error_code("IdempotentParameterMismatch") as e: + msg = "NAT Gateway does not support update and token has already been provided:" + e changed = False result = None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - result['tags'] = describe_ec2_tags(client, module, result['nat_gateway_id'], - resource_type='natgateway') + result["tags"] = describe_ec2_tags(client, module, result["nat_gateway_id"], resource_type="natgateway") return changed, result, msg -def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, eip_address=None, - if_exist_do_not_create=False, wait=False, client_token=None, connectivity_type='public'): +def pre_create( + client, + module, + subnet_id, + tags, + purge_tags, + allocation_id=None, + eip_address=None, + if_exist_do_not_create=False, + wait=False, + client_token=None, + connectivity_type="public", + default_create=False, +): """Create an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client @@ -683,6 +693,8 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, default = False client_token (str): default = None + default_create (bool): create a NAT gateway even if EIP address is not found. + default = False Basic Usage: >>> client = boto3.client('ec2') @@ -717,78 +729,71 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, """ changed = False - msg = '' + msg = "" results = {} if not allocation_id and not eip_address: - existing_gateways, allocation_id_exists = ( - gateway_in_subnet_exists(client, module, subnet_id) - ) + existing_gateways, allocation_id_exists = gateway_in_subnet_exists(client, module, subnet_id) if len(existing_gateways) > 0 and if_exist_do_not_create: results = existing_gateways[0] - changed |= ensure_ec2_tags(client, module, results['nat_gateway_id'], - resource_type='natgateway', tags=tags, - purge_tags=purge_tags) + changed |= ensure_ec2_tags( + client, module, results["nat_gateway_id"], resource_type="natgateway", tags=tags, purge_tags=purge_tags + ) - results['tags'] = describe_ec2_tags(client, module, results['nat_gateway_id'], - resource_type='natgateway') + results["tags"] = describe_ec2_tags(client, module, results["nat_gateway_id"], resource_type="natgateway") if changed: return changed, msg, results changed = False - msg = ( - 'NAT Gateway {0} already exists in subnet_id {1}' - .format( - existing_gateways[0]['nat_gateway_id'], subnet_id - ) - ) + msg = f"NAT Gateway {existing_gateways[0]['nat_gateway_id']} already exists in subnet_id {subnet_id}" return changed, msg, results else: - changed, msg, allocation_id = ( - allocate_eip_address(client, module) - ) + if connectivity_type == "public": + changed, msg, allocation_id = allocate_eip_address(client, module) - if not changed: - return changed, msg, dict() + if not changed: + return changed, msg, dict() elif eip_address or allocation_id: if eip_address and not allocation_id: - allocation_id, msg = ( - get_eip_allocation_id_by_address( - client, module, eip_address - ) - ) - if not allocation_id: + allocation_id, msg = get_eip_allocation_id_by_address(client, module, eip_address) + if not allocation_id and not default_create: changed = False - return changed, msg, dict() + module.fail_json(msg=msg) + elif not allocation_id and default_create: + eip_address = None + return pre_create( + client, + module, + subnet_id, + tags, + purge_tags, + allocation_id, + eip_address, + if_exist_do_not_create, + wait, + client_token, + connectivity_type, + default_create, + ) - existing_gateways, allocation_id_exists = ( - gateway_in_subnet_exists( - client, module, subnet_id, allocation_id - ) - ) + existing_gateways, allocation_id_exists = gateway_in_subnet_exists(client, module, subnet_id, allocation_id) if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create): results = existing_gateways[0] - changed |= ensure_ec2_tags(client, module, results['nat_gateway_id'], - resource_type='natgateway', tags=tags, - purge_tags=purge_tags) + changed |= ensure_ec2_tags( + client, module, results["nat_gateway_id"], resource_type="natgateway", tags=tags, purge_tags=purge_tags + ) - results['tags'] = describe_ec2_tags(client, module, results['nat_gateway_id'], - resource_type='natgateway') + results["tags"] = describe_ec2_tags(client, module, results["nat_gateway_id"], resource_type="natgateway") if changed: return changed, msg, results changed = False - msg = ( - 'NAT Gateway {0} already exists in subnet_id {1}' - .format( - existing_gateways[0]['nat_gateway_id'], subnet_id - ) - ) + msg = f"NAT Gateway {existing_gateways[0]['nat_gateway_id']} already exists in subnet_id {subnet_id}" return changed, msg, results changed, results, msg = create( @@ -798,7 +803,7 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, return changed, msg, results -def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connectivity_type='public'): +def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connectivity_type="public"): """Delete an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client @@ -842,126 +847,111 @@ def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connec """ allocation_id = None - params = { - 'NatGatewayId': nat_gateway_id - } + params = {"NatGatewayId": nat_gateway_id} changed = False results = {} - states = ['pending', 'available'] - msg = '' + states = ["pending", "available"] + msg = "" if module.check_mode: changed = True return changed, msg, results try: - gw_list = ( - get_nat_gateways( - client, module, nat_gateway_id=nat_gateway_id, - states=states - ) - ) + gw_list = get_nat_gateways(client, module, nat_gateway_id=nat_gateway_id, states=states) if len(gw_list) == 1: results = gw_list[0] client.delete_nat_gateway(aws_retry=True, **params) if connectivity_type == "public": - allocation_id = ( - results['nat_gateway_addresses'][0]['allocation_id'] - ) + allocation_id = results["nat_gateway_addresses"][0]["allocation_id"] changed = True - msg = ( - 'NAT gateway {0} is in a deleting state. Delete was successful' - .format(nat_gateway_id) - ) + msg = f"NAT gateway {nat_gateway_id} is in a deleting state. Delete was successful" - if wait and results.get('state') != 'deleted': - wait_for_status(client, module, 'nat_gateway_deleted', nat_gateway_id) + if wait and results.get("state") != "deleted": + wait_for_status(client, module, "nat_gateway_deleted", nat_gateway_id) # Get new results - results = camel_dict_to_snake_dict( - _describe_nat_gateways(client, NatGatewayIds=[nat_gateway_id])[0] - ) - results['tags'] = describe_ec2_tags(client, module, nat_gateway_id, - resource_type='natgateway') + results = camel_dict_to_snake_dict(_describe_nat_gateways(client, NatGatewayIds=[nat_gateway_id])[0]) + results["tags"] = describe_ec2_tags(client, module, nat_gateway_id, resource_type="natgateway") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) if release_eip and allocation_id: - eip_released, msg = ( - release_address(client, module, allocation_id)) + eip_released, msg = release_address(client, module, allocation_id) if not eip_released: - module.fail_json( - msg="Failed to release EIP {0}: {1}".format(allocation_id, msg) - ) + module.fail_json(msg=f"Failed to release EIP {allocation_id}: {msg}") return changed, msg, results def main(): argument_spec = dict( - subnet_id=dict(type='str'), - eip_address=dict(type='str'), - allocation_id=dict(type='str'), - connectivity_type=dict(type='str', default='public', choices=['private', 'public']), - if_exist_do_not_create=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=320, required=False), - release_eip=dict(type='bool', default=False), - nat_gateway_id=dict(type='str'), - client_token=dict(type='str', no_log=False), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + subnet_id=dict(type="str"), + eip_address=dict(type="str"), + allocation_id=dict(type="str"), + connectivity_type=dict(type="str", default="public", choices=["private", "public"]), + if_exist_do_not_create=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=320, required=False), + release_eip=dict(type="bool", default=False), + nat_gateway_id=dict(type="str"), + client_token=dict(type="str", no_log=False), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + default_create=dict(type="bool", default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[ - ['allocation_id', 'eip_address'] - ], - required_if=[['state', 'absent', ['nat_gateway_id']], - ['state', 'present', ['subnet_id']]], + mutually_exclusive=[["allocation_id", "eip_address"]], + required_if=[["state", "absent", ["nat_gateway_id"]], ["state", "present", ["subnet_id"]]], ) - state = module.params.get('state').lower() - subnet_id = module.params.get('subnet_id') - allocation_id = module.params.get('allocation_id') - connectivity_type = module.params.get('connectivity_type') - eip_address = module.params.get('eip_address') - nat_gateway_id = module.params.get('nat_gateway_id') - wait = module.params.get('wait') - release_eip = module.params.get('release_eip') - client_token = module.params.get('client_token') - if_exist_do_not_create = module.params.get('if_exist_do_not_create') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + state = module.params.get("state").lower() + subnet_id = module.params.get("subnet_id") + allocation_id = module.params.get("allocation_id") + connectivity_type = module.params.get("connectivity_type") + eip_address = module.params.get("eip_address") + nat_gateway_id = module.params.get("nat_gateway_id") + wait = module.params.get("wait") + release_eip = module.params.get("release_eip") + client_token = module.params.get("client_token") + if_exist_do_not_create = module.params.get("if_exist_do_not_create") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + default_create = module.params.get("default_create") try: - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") changed = False - msg = '' - - if state == 'present': - changed, msg, results = ( - pre_create( - client, module, subnet_id, tags, purge_tags, allocation_id, eip_address, - if_exist_do_not_create, wait, client_token, connectivity_type - ) + msg = "" + + if state == "present": + changed, msg, results = pre_create( + client, + module, + subnet_id, + tags, + purge_tags, + allocation_id, + eip_address, + if_exist_do_not_create, + wait, + client_token, + connectivity_type, + default_create, ) else: - changed, msg, results = ( - remove( - client, module, nat_gateway_id, wait, release_eip, connectivity_type - ) - ) + changed, msg, results = remove(client, module, nat_gateway_id, wait, release_eip, connectivity_type) module.exit_json(msg=msg, changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py index 45c794e80..a8c76142a 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_nat_gateway_info short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods version_added: 1.0.0 @@ -28,12 +26,12 @@ options: default: {} author: Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of listing all nat gateways - name: List all managed nat gateways in ap-southeast-2 amazon.aws.ec2_vpc_nat_gateway_info: @@ -66,9 +64,9 @@ EXAMPLES = r''' subnet-id: subnet-12345678 state: ['available'] register: existing_nat_gateways -''' +""" -RETURN = r''' +RETURN = r""" changed: description: True if listing the internet gateways succeeds. type: bool @@ -143,7 +141,7 @@ result: sample: Tag1: tag1 Tag_2: tag_2 -''' +""" try: @@ -151,23 +149,24 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff(retries=10) def _describe_nat_gateways(client, module, **params): try: - paginator = client.get_paginator('describe_nat_gateways') - return paginator.paginate(**params).build_full_result()['NatGateways'] - except is_boto3_error_code('InvalidNatGatewayID.NotFound'): + paginator = client.get_paginator("describe_nat_gateways") + return paginator.paginate(**params).build_full_result()["NatGateways"] + except is_boto3_error_code("InvalidNatGatewayID.NotFound"): module.exit_json(msg="NAT gateway not found.") - except is_boto3_error_code('NatGatewayMalformed'): # pylint: disable=duplicate-except + except is_boto3_error_code("NatGatewayMalformed"): # pylint: disable=duplicate-except module.fail_json_aws(msg="NAT gateway id is malformed.") @@ -175,20 +174,20 @@ def get_nat_gateways(client, module): params = dict() nat_gateways = list() - params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['NatGatewayIds'] = module.params.get('nat_gateway_ids') + params["Filter"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["NatGatewayIds"] = module.params.get("nat_gateway_ids") try: result = normalize_boto3_result(_describe_nat_gateways(client, module, **params)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, 'Unable to describe NAT gateways.') + module.fail_json_aws(e, "Unable to describe NAT gateways.") for gateway in result: # Turn the boto3 result into ansible_friendly_snaked_names converted_gateway = camel_dict_to_snake_dict(gateway) - if 'tags' in converted_gateway: + if "tags" in converted_gateway: # Turn the boto3 result into ansible friendly tag dictionary - converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags']) + converted_gateway["tags"] = boto3_tag_list_to_ansible_dict(converted_gateway["tags"]) nat_gateways.append(converted_gateway) return nat_gateways @@ -196,22 +195,24 @@ def get_nat_gateways(client, module): def main(): argument_spec = dict( - filters=dict(default={}, type='dict'), - nat_gateway_ids=dict(default=[], type='list', elements='str'), + filters=dict(default={}, type="dict"), + nat_gateway_ids=dict(default=[], type="list", elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True,) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) try: - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") results = get_nat_gateways(connection, module) module.exit_json(result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py index c7430e989..9e2862013 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_net version_added: 1.0.0 @@ -86,13 +84,13 @@ options: type: bool default: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create a VPC with dedicated tenancy and a couple of tags @@ -109,12 +107,17 @@ EXAMPLES = ''' amazon.aws.ec2_vpc_net: name: Module_dev2 cidr_block: 10.10.0.0/16 - ipv6_cidr: True + ipv6_cidr: true region: us-east-1 tenancy: dedicated -''' -RETURN = ''' +- name: Delete an existing VPC + amazon.aws.ec2_vpc_net: + vpc_id: vpc-0123456789abcdef0 + state: absent +""" + +RETURN = r""" vpc: description: info about the VPC that was created or deleted returned: always @@ -139,11 +142,6 @@ vpc: } } ] - classic_link_enabled: - description: indicates whether ClassicLink is enabled - returned: always - type: bool - sample: false dhcp_options_id: description: the id of the DHCP options associated with this VPC returned: always @@ -204,7 +202,7 @@ vpc: returned: always type: str sample: 123456789012 -''' +""" from time import sleep from time import time @@ -214,17 +212,16 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.common.network import to_subnet from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.network import to_subnet -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @@ -234,40 +231,33 @@ def vpc_exists(module, vpc, name, cidr_block, multi): otherwise it will assume the VPC does not exist and thus return None. """ try: - vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': cidr_block}) - matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs'] + vpc_filters = ansible_dict_to_boto3_filter_list({"tag:Name": name, "cidr-block": cidr_block}) + matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)["Vpcs"] # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block) if not matching_vpcs: - vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': [cidr_block[0]]}) - matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs'] + vpc_filters = ansible_dict_to_boto3_filter_list({"tag:Name": name, "cidr-block": [cidr_block[0]]}) + matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)["Vpcs"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe VPCs") if multi: return None elif len(matching_vpcs) == 1: - return matching_vpcs[0]['VpcId'] + return matching_vpcs[0]["VpcId"] elif len(matching_vpcs) > 1: - module.fail_json(msg='Currently there are %d VPCs that have the same name and ' - 'CIDR block you specified. If you would like to create ' - 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + module.fail_json( + msg=( + f"Currently there are {len(matching_vpcs)} VPCs that have the same name and CIDR block you specified." + " If you would like to create the VPC anyway please pass True to the multi_ok param." + ) + ) return None -def get_classic_link_status(module, connection, vpc_id): - try: - results = connection.describe_vpc_classic_link(aws_retry=True, VpcIds=[vpc_id]) - return results['Vpcs'][0].get('ClassicLinkEnabled') - except is_boto3_error_message('The functionality you requested is not available in this region.'): - return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to describe VPCs") - - def wait_for_vpc_to_exist(module, connection, **params): # wait for vpc to be available try: - get_waiter(connection, 'vpc_exists').wait(**params) + get_waiter(connection, "vpc_exists").wait(**params) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="VPC failed to reach expected state (exists)") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -277,7 +267,7 @@ def wait_for_vpc_to_exist(module, connection, **params): def wait_for_vpc(module, connection, **params): # wait for vpc to be available try: - get_waiter(connection, 'vpc_available').wait(**params) + get_waiter(connection, "vpc_available").wait(**params) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="VPC failed to reach expected state (available)") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -287,12 +277,10 @@ def wait_for_vpc(module, connection, **params): def get_vpc(module, connection, vpc_id, wait=True): wait_for_vpc(module, connection, VpcIds=[vpc_id]) try: - vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0] + vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)["Vpcs"][0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe VPCs") - vpc_obj['ClassicLinkEnabled'] = get_classic_link_status(module, connection, vpc_id) - return vpc_obj @@ -304,7 +292,7 @@ def update_vpc_tags(connection, module, vpc_id, tags, name, purge_tags): if purge_tags and tags is None: purge_tags = False tags = tags or {} - tags.update({'Name': name}) + tags.update({"Name": name}) if tags is None: return False @@ -319,15 +307,15 @@ def update_vpc_tags(connection, module, vpc_id, tags, name, purge_tags): def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): if dhcp_id is None: return False - if vpc_obj['DhcpOptionsId'] == dhcp_id: + if vpc_obj["DhcpOptionsId"] == dhcp_id: return False if module.check_mode: return True try: - connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'], aws_retry=True) + connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj["VpcId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id)) + module.fail_json_aws(e, msg=f"Failed to associate DhcpOptionsId {dhcp_id}") return True @@ -337,18 +325,19 @@ def create_vpc(connection, module, cidr_block, tenancy, tags, ipv6_cidr, name): module.exit_json(changed=True, msg="VPC would be created if not in check mode") create_args = dict( - CidrBlock=cidr_block, InstanceTenancy=tenancy, + CidrBlock=cidr_block, + InstanceTenancy=tenancy, ) if name: tags = tags or {} - tags['Name'] = name + tags["Name"] = name if tags: - create_args['TagSpecifications'] = boto3_tag_specifications(tags, 'vpc') + create_args["TagSpecifications"] = boto3_tag_specifications(tags, "vpc") # Defaults to False (including None) if ipv6_cidr: - create_args['AmazonProvidedIpv6CidrBlock'] = True + create_args["AmazonProvidedIpv6CidrBlock"] = True try: vpc_obj = connection.create_vpc(aws_retry=True, **create_args) @@ -357,18 +346,20 @@ def create_vpc(connection, module, cidr_block, tenancy, tags, ipv6_cidr, name): # wait up to 30 seconds for vpc to exist wait_for_vpc_to_exist( - module, connection, - VpcIds=[vpc_obj['Vpc']['VpcId']], - WaiterConfig=dict(MaxAttempts=30) + module, + connection, + VpcIds=[vpc_obj["Vpc"]["VpcId"]], + WaiterConfig=dict(MaxAttempts=30), ) # Wait for the VPC to enter an 'Available' State wait_for_vpc( - module, connection, - VpcIds=[vpc_obj['Vpc']['VpcId']], - WaiterConfig=dict(MaxAttempts=30) + module, + connection, + VpcIds=[vpc_obj["Vpc"]["VpcId"]], + WaiterConfig=dict(MaxAttempts=30), ) - return vpc_obj['Vpc']['VpcId'] + return vpc_obj["Vpc"]["VpcId"] def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value): @@ -380,18 +371,16 @@ def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value start_time = time() updated = False while time() < start_time + 300: - current_value = connection.describe_vpc_attribute( - Attribute=attribute, - VpcId=vpc_id, - aws_retry=True - )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value'] + current_value = connection.describe_vpc_attribute(Attribute=attribute, VpcId=vpc_id, aws_retry=True)[ + f"{attribute[0].upper()}{attribute[1:]}" + ]["Value"] if current_value != expected_value: sleep(3) else: updated = True break if not updated: - module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute)) + module.fail_json(msg=f"Failed to wait for {attribute} to be updated") def wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_assoc_state): @@ -410,22 +399,31 @@ def wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_assoc_state): while time() < start_time + 300: current_value = get_vpc(module, connection, vpc_id) if current_value: - ipv6_set = current_value.get('Ipv6CidrBlockAssociationSet') + ipv6_set = current_value.get("Ipv6CidrBlockAssociationSet") if ipv6_set: if ipv6_assoc_state: # At least one 'Amazon' IPv6 CIDR block must be associated. for val in ipv6_set: - if val.get('Ipv6Pool') == 'Amazon' and val.get("Ipv6CidrBlockState").get("State") == "associated": + if ( + val.get("Ipv6Pool") == "Amazon" + and val.get("Ipv6CidrBlockState").get("State") == "associated" + ): criteria_match = True break if criteria_match: break else: # All 'Amazon' IPv6 CIDR blocks must be disassociated. - expected_count = sum( - [(val.get("Ipv6Pool") == "Amazon") for val in ipv6_set]) - actual_count = sum([(val.get('Ipv6Pool') == 'Amazon' and - val.get("Ipv6CidrBlockState").get("State") == "disassociated") for val in ipv6_set]) + expected_count = sum([(val.get("Ipv6Pool") == "Amazon") for val in ipv6_set]) + actual_count = sum( + [ + ( + val.get("Ipv6Pool") == "Amazon" + and val.get("Ipv6CidrBlockState").get("State") == "disassociated" + ) + for val in ipv6_set + ] + ) if actual_count == expected_count: criteria_match = True break @@ -440,14 +438,16 @@ def get_cidr_network_bits(module, cidr_block): fixed_cidrs = [] for cidr in cidr_block: - split_addr = cidr.split('/') + split_addr = cidr.split("/") if len(split_addr) == 2: # this_ip is a IPv4 CIDR that may or may not have host bits set # Get the network bits. valid_cidr = to_subnet(split_addr[0], split_addr[1]) if cidr != valid_cidr: - module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr)) + module.warn( + f"One of your CIDR addresses ({cidr}) has host bits set. To get rid of this warning, check the" + f" network mask and make sure that only network bits are set: {valid_cidr}." + ) fixed_cidrs.append(valid_cidr) else: # let AWS handle invalid CIDRs @@ -461,9 +461,12 @@ def update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr): # Fetch current state from vpc_object current_ipv6_cidr = False - if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys(): - for ipv6_assoc in vpc_obj['Ipv6CidrBlockAssociationSet']: - if ipv6_assoc['Ipv6Pool'] == 'Amazon' and ipv6_assoc['Ipv6CidrBlockState']['State'] in ['associated', 'associating']: + if "Ipv6CidrBlockAssociationSet" in vpc_obj.keys(): + for ipv6_assoc in vpc_obj["Ipv6CidrBlockAssociationSet"]: + if ipv6_assoc["Ipv6Pool"] == "Amazon" and ipv6_assoc["Ipv6CidrBlockState"]["State"] in [ + "associated", + "associating", + ]: current_ipv6_cidr = True break @@ -480,12 +483,15 @@ def update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Unable to associate IPv6 CIDR") else: - for ipv6_assoc in vpc_obj['Ipv6CidrBlockAssociationSet']: - if ipv6_assoc['Ipv6Pool'] == 'Amazon' and ipv6_assoc['Ipv6CidrBlockState']['State'] in ['associated', 'associating']: + for ipv6_assoc in vpc_obj["Ipv6CidrBlockAssociationSet"]: + if ipv6_assoc["Ipv6Pool"] == "Amazon" and ipv6_assoc["Ipv6CidrBlockState"]["State"] in [ + "associated", + "associating", + ]: try: - connection.disassociate_vpc_cidr_block(AssociationId=ipv6_assoc['AssociationId'], aws_retry=True) + connection.disassociate_vpc_cidr_block(AssociationId=ipv6_assoc["AssociationId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Unable to disassociate IPv6 CIDR {0}.".format(ipv6_assoc['AssociationId'])) + module.fail_json_aws(e, f"Unable to disassociate IPv6 CIDR {ipv6_assoc['AssociationId']}.") return True @@ -493,8 +499,11 @@ def update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs): if cidr_block is None: return False, None - associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', []) - if cidr['CidrBlockState']['State'] not in ['disassociating', 'disassociated']) + associated_cidrs = dict( + (cidr["CidrBlock"], cidr["AssociationId"]) + for cidr in vpc_obj.get("CidrBlockAssociationSet", []) + if cidr["CidrBlockState"]["State"] not in ["disassociating", "disassociated"] + ) current_cidrs = set(associated_cidrs.keys()) desired_cidrs = set(cidr_block) @@ -514,15 +523,20 @@ def update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs): try: connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(cidr)) + module.fail_json_aws(e, f"Unable to associate CIDR {cidr}.") for cidr in cidrs_to_remove: association_id = associated_cidrs[cidr] try: connection.disassociate_vpc_cidr_block(AssociationId=association_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that " - "are associated with the CIDR block before you can disassociate it.".format(association_id)) + module.fail_json_aws( + e, + ( + f"Unable to disassociate {association_id}. You must detach or delete all gateways and resources" + " that are associated with the CIDR block before you can disassociate it." + ), + ) return True, list(desired_cidrs) @@ -530,7 +544,9 @@ def update_dns_enabled(connection, module, vpc_id, dns_support): if dns_support is None: return False - current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value'] + current_dns_enabled = connection.describe_vpc_attribute(Attribute="enableDnsSupport", VpcId=vpc_id, aws_retry=True)[ + "EnableDnsSupport" + ]["Value"] if current_dns_enabled == dns_support: return False @@ -538,7 +554,7 @@ def update_dns_enabled(connection, module, vpc_id, dns_support): return True try: - connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}, aws_retry=True) + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={"Value": dns_support}, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Failed to update enabled dns support attribute") return True @@ -548,7 +564,9 @@ def update_dns_hostnames(connection, module, vpc_id, dns_hostnames): if dns_hostnames is None: return False - current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value'] + current_dns_hostnames = connection.describe_vpc_attribute( + Attribute="enableDnsHostnames", VpcId=vpc_id, aws_retry=True + )["EnableDnsHostnames"]["Value"] if current_dns_hostnames == dns_hostnames: return False @@ -556,7 +574,7 @@ def update_dns_hostnames(connection, module, vpc_id, dns_hostnames): return True try: - connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}, aws_retry=True) + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={"Value": dns_hostnames}, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute") return True @@ -572,37 +590,40 @@ def delete_vpc(connection, module, vpc_id): connection.delete_vpc(VpcId=vpc_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( - e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " - "and/or ec2_vpc_route_table modules to ensure that all depenednt components are absent.".format(vpc_id) + e, + msg=( + f"Failed to delete VPC {vpc_id} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, and/or" + " ec2_vpc_route_table modules to ensure that all depenednt components are absent." + ), ) return True def wait_for_updates(connection, module, vpc_id, ipv6_cidr, expected_cidrs, dns_support, dns_hostnames, tags, dhcp_id): - if module.check_mode: return if expected_cidrs: wait_for_vpc( - module, connection, + module, + connection, VpcIds=[vpc_id], - Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}] + Filters=[{"Name": "cidr-block-association.cidr-block", "Values": expected_cidrs}], ) wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_cidr) if tags is not None: tag_list = ansible_dict_to_boto3_tag_list(tags) - filters = [{'Name': 'tag:{0}'.format(t['Key']), 'Values': [t['Value']]} for t in tag_list] + filters = [{"Name": f"tag:{t['Key']}", "Values": [t["Value"]]} for t in tag_list] wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters) - wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support) - wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames) + wait_for_vpc_attribute(connection, module, vpc_id, "enableDnsSupport", dns_support) + wait_for_vpc_attribute(connection, module, vpc_id, "enableDnsHostnames", dns_hostnames) if dhcp_id is not None: # Wait for DhcpOptionsId to be updated - filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}] + filters = [{"Name": "dhcp-options-id", "Values": [dhcp_id]}] wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters) return @@ -611,72 +632,69 @@ def wait_for_updates(connection, module, vpc_id, ipv6_cidr, expected_cidrs, dns_ def main(): argument_spec = dict( name=dict(required=False), - vpc_id=dict(type='str', required=False, default=None), - cidr_block=dict(type='list', elements='str'), - ipv6_cidr=dict(type='bool', default=None), - tenancy=dict(choices=['default', 'dedicated'], default='default'), - dns_support=dict(type='bool'), - dns_hostnames=dict(type='bool'), + vpc_id=dict(type="str", required=False, default=None), + cidr_block=dict(type="list", elements="str"), + ipv6_cidr=dict(type="bool", default=None), + tenancy=dict(choices=["default", "dedicated"], default="default"), + dns_support=dict(type="bool"), + dns_hostnames=dict(type="bool"), dhcp_opts_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(choices=['present', 'absent'], default='present'), - multi_ok=dict(type='bool', default=False), - purge_cidrs=dict(type='bool', default=False), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(choices=["present", "absent"], default="present"), + multi_ok=dict(type="bool", default=False), + purge_cidrs=dict(type="bool", default=False), ) required_one_of = [ - ['vpc_id', 'name'], - ['vpc_id', 'cidr_block'], + ["vpc_id", "name"], + ["vpc_id", "cidr_block"], ] - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=required_one_of, - supports_check_mode=True - ) - - name = module.params.get('name') - vpc_id = module.params.get('vpc_id') - cidr_block = module.params.get('cidr_block') - ipv6_cidr = module.params.get('ipv6_cidr') - purge_cidrs = module.params.get('purge_cidrs') - tenancy = module.params.get('tenancy') - dns_support = module.params.get('dns_support') - dns_hostnames = module.params.get('dns_hostnames') - dhcp_id = module.params.get('dhcp_opts_id') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - state = module.params.get('state') - multi = module.params.get('multi_ok') + module = AnsibleAWSModule(argument_spec=argument_spec, required_one_of=required_one_of, supports_check_mode=True) + + name = module.params.get("name") + vpc_id = module.params.get("vpc_id") + cidr_block = module.params.get("cidr_block") + ipv6_cidr = module.params.get("ipv6_cidr") + purge_cidrs = module.params.get("purge_cidrs") + tenancy = module.params.get("tenancy") + dns_support = module.params.get("dns_support") + dns_hostnames = module.params.get("dns_hostnames") + dhcp_id = module.params.get("dhcp_opts_id") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + state = module.params.get("state") + multi = module.params.get("multi_ok") changed = False connection = module.client( - 'ec2', + "ec2", retry_decorator=AWSRetry.jittered_backoff( - retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound'] + retries=8, delay=3, catch_extra_error_codes=["InvalidVpcID.NotFound"] ), ) if dns_hostnames and not dns_support: - module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support') + module.fail_json(msg="In order to enable DNS Hostnames you must also enable DNS support") - cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block')) + cidr_block = get_cidr_network_bits(module, module.params.get("cidr_block")) if vpc_id is None: vpc_id = vpc_exists(module, connection, name, cidr_block, multi) - if state == 'present': - + if state == "present": # Check if VPC exists if vpc_id is None: - if module.params.get('name') is None: - module.fail_json('The name parameter must be specified when creating a new VPC.') + if module.params.get("name") is None: + module.fail_json("The name parameter must be specified when creating a new VPC.") vpc_id = create_vpc(connection, module, cidr_block[0], tenancy, tags, ipv6_cidr, name) changed = True vpc_obj = get_vpc(module, connection, vpc_id) if len(cidr_block) > 1: - cidrs_changed, desired_cidrs = update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs) + cidrs_changed, desired_cidrs = update_cidrs( + connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs + ) changed |= cidrs_changed else: desired_cidrs = None @@ -701,20 +719,22 @@ def main(): hostnames_changed = update_dns_hostnames(connection, module, vpc_id, dns_hostnames) changed |= hostnames_changed - wait_for_updates(connection, module, vpc_id, ipv6_cidr, desired_cidrs, dns_support, dns_hostnames, tags, dhcp_id) + wait_for_updates( + connection, module, vpc_id, ipv6_cidr, desired_cidrs, dns_support, dns_hostnames, tags, dhcp_id + ) updated_obj = get_vpc(module, connection, vpc_id) final_state = camel_dict_to_snake_dict(updated_obj) - final_state['tags'] = boto3_tag_list_to_ansible_dict(updated_obj.get('Tags', [])) - final_state['name'] = final_state['tags'].get('Name', None) - final_state['id'] = final_state.pop('vpc_id') + final_state["tags"] = boto3_tag_list_to_ansible_dict(updated_obj.get("Tags", [])) + final_state["name"] = final_state["tags"].get("Name", None) + final_state["id"] = final_state.pop("vpc_id") module.exit_json(changed=changed, vpc=final_state) - elif state == 'absent': + elif state == "absent": changed = delete_vpc(connection, module, vpc_id) module.exit_json(changed=changed, vpc={}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py index e32b42d83..93b44fa79 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_net_info version_added: 1.0.0 @@ -28,12 +26,12 @@ options: type: dict default: {} extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all VPCs @@ -47,10 +45,9 @@ EXAMPLES = ''' - amazon.aws.ec2_vpc_net_info: filters: "tag:Name": Example +""" -''' - -RETURN = ''' +RETURN = r""" vpcs: description: Returns an array of complex objects as described below. returned: success @@ -84,14 +81,6 @@ vpcs: description: The IPv4 CIDR block assigned to the VPC. returned: always type: str - classic_link_dns_supported: - description: True/False depending on attribute setting for classic link DNS support. - returned: always - type: bool - classic_link_enabled: - description: True/False depending on if classic link support is enabled. - returned: always - type: bool enable_dns_hostnames: description: True/False depending on attribute setting for DNS hostnames support. returned: always @@ -154,7 +143,7 @@ vpcs: returned: always type: str sample: dopt-12345678 -''' +""" try: import botocore @@ -163,11 +152,11 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def describe_vpcs(connection, module): @@ -178,8 +167,8 @@ def describe_vpcs(connection, module): module : AnsibleAWSModule object """ # collect parameters - filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - vpc_ids = module.params.get('vpc_ids') + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + vpc_ids = module.params.get("vpc_ids") # init empty list for return vars vpc_info = list() @@ -188,66 +177,36 @@ def describe_vpcs(connection, module): try: response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids)) + module.fail_json_aws(e, msg=f"Unable to describe VPCs {vpc_ids}") # We can get these results in bulk but still needs two separate calls to the API - cl_enabled = {} - cl_dns_support = {} dns_support = {} dns_hostnames = {} # Loop through the results and add the other VPC attributes we gathered - for vpc in response['Vpcs']: + for vpc in response["Vpcs"]: error_message = "Unable to describe VPC attribute {0} on VPC {1}" - cl_enabled = describe_classic_links(module, connection, vpc['VpcId'], 'ClassicLinkEnabled', error_message) - cl_dns_support = describe_classic_links(module, connection, vpc['VpcId'], 'ClassicLinkDnsSupported', error_message) - dns_support = describe_vpc_attribute(module, connection, vpc['VpcId'], 'enableDnsSupport', error_message) - dns_hostnames = describe_vpc_attribute(module, connection, vpc['VpcId'], 'enableDnsHostnames', error_message) - if cl_enabled: - # loop through the ClassicLink Enabled results and add the value for the correct VPC - for item in cl_enabled['Vpcs']: - if vpc['VpcId'] == item['VpcId']: - vpc['ClassicLinkEnabled'] = item.get('ClassicLinkEnabled', False) - if cl_dns_support: - # loop through the ClassicLink DNS support results and add the value for the correct VPC - for item in cl_dns_support['Vpcs']: - if vpc['VpcId'] == item['VpcId']: - vpc['ClassicLinkDnsSupported'] = item.get('ClassicLinkDnsSupported', False) + dns_support = describe_vpc_attribute(module, connection, vpc["VpcId"], "enableDnsSupport", error_message) + dns_hostnames = describe_vpc_attribute(module, connection, vpc["VpcId"], "enableDnsHostnames", error_message) # add the two DNS attributes if dns_support: - vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value') + vpc["EnableDnsSupport"] = dns_support["EnableDnsSupport"].get("Value") if dns_hostnames: - vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value') + vpc["EnableDnsHostnames"] = dns_hostnames["EnableDnsHostnames"].get("Value") # for backwards compatibility - vpc['id'] = vpc['VpcId'] + vpc["id"] = vpc["VpcId"] vpc_info.append(camel_dict_to_snake_dict(vpc)) # convert tag list to ansible dict - vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', [])) + vpc_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(vpc.get("Tags", [])) module.exit_json(vpcs=vpc_info) -def describe_classic_links(module, connection, vpc, attribute, error_message): - result = None - try: - if attribute == "ClassicLinkEnabled": - result = connection.describe_vpc_classic_link(VpcIds=[vpc], aws_retry=True) - else: - result = connection.describe_vpc_classic_link_dns_support(VpcIds=[vpc], aws_retry=True) - except is_boto3_error_code('UnsupportedOperation'): - result = {'Vpcs': [{'VpcId': vpc}]} - except is_boto3_error_code('InvalidVpcID.NotFound'): - module.warn(error_message.format(attribute, vpc)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Unable to describe if {0} is enabled'.format(attribute)) - return result - - def describe_vpc_attribute(module, connection, vpc, attribute, error_message): result = None try: return connection.describe_vpc_attribute(VpcId=vpc, Attribute=attribute, aws_retry=True) - except is_boto3_error_code('InvalidVpcID.NotFound'): + except is_boto3_error_code("InvalidVpcID.NotFound"): module.warn(error_message.format(attribute, vpc)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg=error_message.format(attribute, vpc)) @@ -256,16 +215,16 @@ def describe_vpc_attribute(module, connection, vpc, attribute, error_message): def main(): argument_spec = dict( - vpc_ids=dict(type='list', elements='str', default=[]), - filters=dict(type='dict', default={}) + vpc_ids=dict(type="list", elements="str", default=[]), + filters=dict(type="dict", default={}), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) describe_vpcs(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py index 583a0a076..34f12e789 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_route_table version_added: 1.0.0 @@ -85,13 +83,13 @@ options: notes: - Tags are used to uniquely identify route tables within a VPC when the I(route_table_id) is not supplied. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic creation example: @@ -155,9 +153,9 @@ EXAMPLES = r''' route_table_id: "{{ route_table.id }}" lookup: id state: absent -''' +""" -RETURN = r''' +RETURN = r""" route_table: description: Route Table result. returned: always @@ -258,6 +256,12 @@ route_table: returned: when the route is via a NAT gateway type: str sample: local + carrier_gateway_id: + description: ID of the Carrier gateway. + returned: when the route is via a Carrier gateway + type: str + sample: local + version_added: 6.0.0 origin: description: mechanism through which the route is in the table. returned: always @@ -280,11 +284,11 @@ route_table: returned: always type: str sample: vpc-6e2d2407 -''' +""" import re -from time import sleep from ipaddress import ip_network +from time import sleep try: import botocore @@ -294,33 +298,34 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @AWSRetry.jittered_backoff() def describe_subnets_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_subnets') - return paginator.paginate(**params).build_full_result()['Subnets'] + paginator = connection.get_paginator("describe_subnets") + return paginator.paginate(**params).build_full_result()["Subnets"] @AWSRetry.jittered_backoff() def describe_igws_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_internet_gateways') - return paginator.paginate(**params).build_full_result()['InternetGateways'] + paginator = connection.get_paginator("describe_internet_gateways") + return paginator.paginate(**params).build_full_result()["InternetGateways"] @AWSRetry.jittered_backoff() def describe_route_tables_with_backoff(connection, **params): try: - paginator = connection.get_paginator('describe_route_tables') - return paginator.paginate(**params).build_full_result()['RouteTables'] - except is_boto3_error_code('InvalidRouteTableID.NotFound'): + paginator = connection.get_paginator("describe_route_tables") + return paginator.paginate(**params).build_full_result()["RouteTables"] + except is_boto3_error_code("InvalidRouteTableID.NotFound"): return None @@ -329,13 +334,13 @@ def find_subnets(connection, module, vpc_id, identified_subnets): Finds a list of subnets, each identified either by a raw ID, a unique 'Name' tag, or a CIDR such as 10.0.0.0/8. """ - CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') - SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') + CIDR_RE = re.compile(r"^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$") + SUBNET_RE = re.compile(r"^subnet-[A-z0-9]+$") subnet_ids = [] subnet_names = [] subnet_cidrs = [] - for subnet in (identified_subnets or []): + for subnet in identified_subnets or []: if re.match(SUBNET_RE, subnet): subnet_ids.append(subnet) elif re.match(CIDR_RE, subnet): @@ -345,34 +350,36 @@ def find_subnets(connection, module, vpc_id, identified_subnets): subnets_by_id = [] if subnet_ids: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id}) try: subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids) + module.fail_json_aws(e, msg=f"Couldn't find subnet with id {subnet_ids}") subnets_by_cidr = [] if subnet_cidrs: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "cidr": subnet_cidrs}) try: subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs) + module.fail_json_aws(e, msg=f"Couldn't find subnet with cidr {subnet_cidrs}") subnets_by_name = [] if subnet_names: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "tag:Name": subnet_names}) try: subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names) + module.fail_json_aws(e, msg=f"Couldn't find subnet with names {subnet_names}") for name in subnet_names: - matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name]) + matching_count = len( + [1 for s in subnets_by_name for t in s.get("Tags", []) if t["Key"] == "Name" and t["Value"] == name] + ) if matching_count == 0: - module.fail_json(msg='Subnet named "{0}" does not exist'.format(name)) + module.fail_json(msg=f'Subnet named "{name}" does not exist') elif matching_count > 1: - module.fail_json(msg='Multiple subnets named "{0}"'.format(name)) + module.fail_json(msg=f'Multiple subnets named "{name}"') return subnets_by_id + subnets_by_cidr + subnets_by_name @@ -381,26 +388,24 @@ def find_igw(connection, module, vpc_id): """ Finds the Internet gateway for the given VPC ID. """ - filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) + filters = ansible_dict_to_boto3_filter_list({"attachment.vpc-id": vpc_id}) try: igw = describe_igws_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id)) + module.fail_json_aws(e, msg=f"No IGW found for VPC {vpc_id}") if len(igw) == 1: - return igw[0]['InternetGatewayId'] + return igw[0]["InternetGatewayId"] elif len(igw) == 0: - module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f"No IGWs found for VPC {vpc_id}") else: - module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f"Multiple IGWs found for VPC {vpc_id}") def tags_match(match_tags, candidate_tags): - return all((k in candidate_tags and candidate_tags[k] == v - for k, v in match_tags.items())) + return all((k in candidate_tags and candidate_tags[k] == v for k, v in match_tags.items())) def get_route_table_by_id(connection, module, route_table_id): - route_table = None try: route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id]) @@ -415,13 +420,13 @@ def get_route_table_by_id(connection, module, route_table_id): def get_route_table_by_tags(connection, module, vpc_id, tags): count = 0 route_table = None - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id}) try: route_tables = describe_route_tables_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route table") for table in route_tables: - this_tags = describe_ec2_tags(connection, module, table['RouteTableId']) + this_tags = describe_ec2_tags(connection, module, table["RouteTableId"]) if tags_match(tags, this_tags): route_table = table count += 1 @@ -433,20 +438,20 @@ def get_route_table_by_tags(connection, module, vpc_id, tags): def route_spec_matches_route(route_spec, route): - if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']: - route_spec['NatGatewayId'] = route_spec.pop('GatewayId') - if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']: - if route_spec.get('DestinationCidrBlock', '').startswith('pl-'): - route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock') + if route_spec.get("GatewayId") and "nat-" in route_spec["GatewayId"]: + route_spec["NatGatewayId"] = route_spec.pop("GatewayId") + if route_spec.get("GatewayId") and "vpce-" in route_spec["GatewayId"]: + if route_spec.get("DestinationCidrBlock", "").startswith("pl-"): + route_spec["DestinationPrefixListId"] = route_spec.pop("DestinationCidrBlock") return set(route_spec.items()).issubset(route.items()) def route_spec_matches_route_cidr(route_spec, route): - if route_spec.get('DestinationCidrBlock') and route.get('DestinationCidrBlock'): - return route_spec.get('DestinationCidrBlock') == route.get('DestinationCidrBlock') - if route_spec.get('DestinationIpv6CidrBlock') and route.get('DestinationIpv6CidrBlock'): - return route_spec.get('DestinationIpv6CidrBlock') == route.get('DestinationIpv6CidrBlock') + if route_spec.get("DestinationCidrBlock") and route.get("DestinationCidrBlock"): + return route_spec.get("DestinationCidrBlock") == route.get("DestinationCidrBlock") + if route_spec.get("DestinationIpv6CidrBlock") and route.get("DestinationIpv6CidrBlock"): + return route_spec.get("DestinationIpv6CidrBlock") == route.get("DestinationIpv6CidrBlock") return False @@ -458,39 +463,43 @@ def index_of_matching_route(route_spec, routes_to_match): for i, route in enumerate(routes_to_match): if route_spec_matches_route(route_spec, route): return "exact", i - elif 'Origin' in route and route['Origin'] != 'EnableVgwRoutePropagation': # only replace created routes + elif "Origin" in route and route["Origin"] != "EnableVgwRoutePropagation": # only replace created routes if route_spec_matches_route_cidr(route_spec, route): return "replace", i def ensure_routes(connection, module, route_table, route_specs, purge_routes): - routes_to_match = list(route_table['Routes']) + routes_to_match = list(route_table["Routes"]) route_specs_to_create = [] route_specs_to_recreate = [] for route_spec in route_specs: match = index_of_matching_route(route_spec, routes_to_match) if match is None: - if route_spec.get('DestinationCidrBlock') or route_spec.get('DestinationIpv6CidrBlock'): + if route_spec.get("DestinationCidrBlock") or route_spec.get("DestinationIpv6CidrBlock"): route_specs_to_create.append(route_spec) else: - module.warn("Skipping creating {0} because it has no destination cidr block. " - "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec)) + module.warn( + f"Skipping creating {route_spec} because it has no destination cidr block. To add VPC endpoints to" + " route tables use the ec2_vpc_endpoint module." + ) else: if match[0] == "replace": - if route_spec.get('DestinationCidrBlock'): + if route_spec.get("DestinationCidrBlock"): route_specs_to_recreate.append(route_spec) else: - module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec)) + module.warn(f"Skipping recreating route {route_spec} because it has no destination cidr block.") del routes_to_match[match[1]] routes_to_delete = [] if purge_routes: for route in routes_to_match: - if not route.get('DestinationCidrBlock'): - module.warn("Skipping purging route {0} because it has no destination cidr block. " - "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(route)) + if not route.get("DestinationCidrBlock"): + module.warn( + f"Skipping purging route {route} because it has no destination cidr block. To remove VPC endpoints" + " from route tables use the ec2_vpc_endpoint module." + ) continue - if route['Origin'] == 'CreateRoute': + if route["Origin"] == "CreateRoute": routes_to_delete.append(route) changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate) @@ -499,78 +508,91 @@ def ensure_routes(connection, module, route_table, route_specs, purge_routes): try: connection.delete_route( aws_retry=True, - RouteTableId=route_table['RouteTableId'], - DestinationCidrBlock=route['DestinationCidrBlock']) + RouteTableId=route_table["RouteTableId"], + DestinationCidrBlock=route["DestinationCidrBlock"], + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete route") for route_spec in route_specs_to_recreate: try: - connection.replace_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) + connection.replace_route(aws_retry=True, RouteTableId=route_table["RouteTableId"], **route_spec) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't recreate route") for route_spec in route_specs_to_create: try: - connection.create_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) - except is_boto3_error_code('RouteAlreadyExists'): + connection.create_route(aws_retry=True, RouteTableId=route_table["RouteTableId"], **route_spec) + except is_boto3_error_code("RouteAlreadyExists"): changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create route") return changed def ensure_subnet_association(connection, module, vpc_id, route_table_id, subnet_id): - filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id}) + filters = ansible_dict_to_boto3_filter_list({"association.subnet-id": subnet_id, "vpc-id": vpc_id}) try: route_tables = describe_route_tables_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route tables") for route_table in route_tables: - if route_table.get('RouteTableId'): - for association in route_table['Associations']: - if association['Main']: + if route_table.get("RouteTableId"): + for association in route_table["Associations"]: + if association["Main"]: continue - if association['SubnetId'] == subnet_id: - if route_table['RouteTableId'] == route_table_id: - return {'changed': False, 'association_id': association['RouteTableAssociationId']} + if association["SubnetId"] == subnet_id: + if route_table["RouteTableId"] == route_table_id: + return {"changed": False, "association_id": association["RouteTableAssociationId"]} if module.check_mode: - return {'changed': True} + return {"changed": True} try: connection.disassociate_route_table( - aws_retry=True, AssociationId=association['RouteTableAssociationId']) + aws_retry=True, AssociationId=association["RouteTableAssociationId"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") if module.check_mode: - return {'changed': True} + return {"changed": True} try: - association_id = connection.associate_route_table(aws_retry=True, - RouteTableId=route_table_id, - SubnetId=subnet_id) + association_id = connection.associate_route_table( + aws_retry=True, RouteTableId=route_table_id, SubnetId=subnet_id + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't associate subnet with route table") - return {'changed': True, 'association_id': association_id} + return {"changed": True, "association_id": association_id} def ensure_subnet_associations(connection, module, route_table, subnets, purge_subnets): - current_association_ids = [association['RouteTableAssociationId'] for association in route_table['Associations'] - if not association['Main'] and association.get('SubnetId')] + current_association_ids = [ + association["RouteTableAssociationId"] + for association in route_table["Associations"] + if not association["Main"] and association.get("SubnetId") + ] new_association_ids = [] changed = False for subnet in subnets: result = ensure_subnet_association( - connection=connection, module=module, vpc_id=route_table['VpcId'], - route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId']) - changed = changed or result['changed'] + connection=connection, + module=module, + vpc_id=route_table["VpcId"], + route_table_id=route_table["RouteTableId"], + subnet_id=subnet["SubnetId"], + ) + changed = changed or result["changed"] if changed and module.check_mode: return True - new_association_ids.append(result['association_id']) + new_association_ids.append(result["association_id"]) if purge_subnets: - to_delete = [association_id for association_id in current_association_ids - if association_id not in new_association_ids] + to_delete = [ + association_id for association_id in current_association_ids if association_id not in new_association_ids + ] for association_id in to_delete: changed = True if not module.check_mode: @@ -586,8 +608,13 @@ def disassociate_gateway(connection, module, route_table): # Delete all gateway associations that have state = associated # Subnet associations are handled in its method changed = False - associations_to_delete = [association['RouteTableAssociationId'] for association in route_table['Associations'] if not association['Main'] - and association.get('GatewayId') and association['AssociationState']['State'] in ['associated', 'associating']] + associations_to_delete = [ + association["RouteTableAssociationId"] + for association in route_table["Associations"] + if not association["Main"] + and association.get("GatewayId") + and association["AssociationState"]["State"] in ["associated", "associating"] + ] for association_id in associations_to_delete: changed = True if not module.check_mode: @@ -600,33 +627,36 @@ def disassociate_gateway(connection, module, route_table): def associate_gateway(connection, module, route_table, gateway_id): - filters = ansible_dict_to_boto3_filter_list({'association.gateway-id': gateway_id, 'vpc-id': route_table['VpcId']}) + filters = ansible_dict_to_boto3_filter_list({"association.gateway-id": gateway_id, "vpc-id": route_table["VpcId"]}) try: route_tables = describe_route_tables_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route tables") for table in route_tables: - if table.get('RouteTableId'): - for association in table.get('Associations'): - if association['Main']: + if table.get("RouteTableId"): + for association in table.get("Associations"): + if association["Main"]: continue - if association.get('GatewayId', '') == gateway_id and (association['AssociationState']['State'] in ['associated', 'associating']): - if table['RouteTableId'] == route_table['RouteTableId']: + if association.get("GatewayId", "") == gateway_id and ( + association["AssociationState"]["State"] in ["associated", "associating"] + ): + if table["RouteTableId"] == route_table["RouteTableId"]: return False elif module.check_mode: return True else: try: connection.disassociate_route_table( - aws_retry=True, AssociationId=association['RouteTableAssociationId']) + aws_retry=True, AssociationId=association["RouteTableAssociationId"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't disassociate gateway from route table") if not module.check_mode: try: - connection.associate_route_table(aws_retry=True, - RouteTableId=route_table['RouteTableId'], - GatewayId=gateway_id) + connection.associate_route_table( + aws_retry=True, RouteTableId=route_table["RouteTableId"], GatewayId=gateway_id + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't associate gateway with route table") return True @@ -634,7 +664,7 @@ def associate_gateway(connection, module, route_table, gateway_id): def ensure_propagation(connection, module, route_table, propagating_vgw_ids): changed = False - gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']] + gateways = [gateway["GatewayId"] for gateway in route_table["PropagatingVgws"]] vgws_to_add = set(propagating_vgw_ids) - set(gateways) if vgws_to_add: changed = True @@ -642,9 +672,8 @@ def ensure_propagation(connection, module, route_table, propagating_vgw_ids): for vgw_id in vgws_to_add: try: connection.enable_vgw_route_propagation( - aws_retry=True, - RouteTableId=route_table['RouteTableId'], - GatewayId=vgw_id) + aws_retry=True, RouteTableId=route_table["RouteTableId"], GatewayId=vgw_id + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't enable route propagation") @@ -652,86 +681,86 @@ def ensure_propagation(connection, module, route_table, propagating_vgw_ids): def ensure_route_table_absent(connection, module): + lookup = module.params.get("lookup") + route_table_id = module.params.get("route_table_id") + tags = module.params.get("tags") + vpc_id = module.params.get("vpc_id") + purge_subnets = module.params.get("purge_subnets") - lookup = module.params.get('lookup') - route_table_id = module.params.get('route_table_id') - tags = module.params.get('tags') - vpc_id = module.params.get('vpc_id') - purge_subnets = module.params.get('purge_subnets') - - if lookup == 'tag': + if lookup == "tag": if tags is not None: route_table = get_route_table_by_tags(connection, module, vpc_id, tags) else: route_table = None - elif lookup == 'id': + elif lookup == "id": route_table = get_route_table_by_id(connection, module, route_table_id) if route_table is None: - return {'changed': False} + return {"changed": False} # disassociate subnets and gateway before deleting route table if not module.check_mode: - ensure_subnet_associations(connection=connection, module=module, route_table=route_table, - subnets=[], purge_subnets=purge_subnets) + ensure_subnet_associations( + connection=connection, module=module, route_table=route_table, subnets=[], purge_subnets=purge_subnets + ) disassociate_gateway(connection=connection, module=module, route_table=route_table) try: - connection.delete_route_table(aws_retry=True, RouteTableId=route_table['RouteTableId']) + connection.delete_route_table(aws_retry=True, RouteTableId=route_table["RouteTableId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error deleting route table") - return {'changed': True} + return {"changed": True} def get_route_table_info(connection, module, route_table): - result = get_route_table_by_id(connection, module, route_table['RouteTableId']) + result = get_route_table_by_id(connection, module, route_table["RouteTableId"]) try: - result['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) + result["Tags"] = describe_ec2_tags(connection, module, route_table["RouteTableId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get tags for route table") - result = camel_dict_to_snake_dict(result, ignore_list=['Tags']) + result = camel_dict_to_snake_dict(result, ignore_list=["Tags"]) # backwards compatibility - result['id'] = result['route_table_id'] + result["id"] = result["route_table_id"] return result def create_route_spec(connection, module, vpc_id): - routes = module.params.get('routes') + routes = module.params.get("routes") for route_spec in routes: - - cidr_block_type = str(type(ip_network(route_spec['dest']))) + cidr_block_type = str(type(ip_network(route_spec["dest"]))) if "IPv4" in cidr_block_type: - rename_key(route_spec, 'dest', 'destination_cidr_block') + rename_key(route_spec, "dest", "destination_cidr_block") if "IPv6" in cidr_block_type: - rename_key(route_spec, 'dest', 'destination_ipv6_cidr_block') + rename_key(route_spec, "dest", "destination_ipv6_cidr_block") - if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw': + if route_spec.get("gateway_id") and route_spec["gateway_id"].lower() == "igw": igw = find_igw(connection, module, vpc_id) - route_spec['gateway_id'] = igw - if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'): - rename_key(route_spec, 'gateway_id', 'nat_gateway_id') + route_spec["gateway_id"] = igw + if route_spec.get("gateway_id") and route_spec["gateway_id"].startswith("nat-"): + rename_key(route_spec, "gateway_id", "nat_gateway_id") + if route_spec.get("gateway_id") and route_spec["gateway_id"].startswith("cagw-"): + rename_key(route_spec, "gateway_id", "carrier_gateway_id") return snake_dict_to_camel_dict(routes, capitalize_first=True) def ensure_route_table_present(connection, module): - - gateway_id = module.params.get('gateway_id') - lookup = module.params.get('lookup') - propagating_vgw_ids = module.params.get('propagating_vgw_ids') - purge_routes = module.params.get('purge_routes') - purge_subnets = module.params.get('purge_subnets') - purge_tags = module.params.get('purge_tags') - route_table_id = module.params.get('route_table_id') - subnets = module.params.get('subnets') - tags = module.params.get('tags') - vpc_id = module.params.get('vpc_id') + gateway_id = module.params.get("gateway_id") + lookup = module.params.get("lookup") + propagating_vgw_ids = module.params.get("propagating_vgw_ids") + purge_routes = module.params.get("purge_routes") + purge_subnets = module.params.get("purge_subnets") + purge_tags = module.params.get("purge_tags") + route_table_id = module.params.get("route_table_id") + subnets = module.params.get("subnets") + tags = module.params.get("tags") + vpc_id = module.params.get("vpc_id") routes = create_route_spec(connection, module, vpc_id) changed = False tags_valid = False - if lookup == 'tag': + if lookup == "tag": if tags is not None: try: route_table = get_route_table_by_tags(connection, module, vpc_id, tags) @@ -739,7 +768,7 @@ def ensure_route_table_present(connection, module): module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'") else: route_table = None - elif lookup == 'id': + elif lookup == "id": try: route_table = get_route_table_by_id(connection, module, route_table_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -750,15 +779,16 @@ def ensure_route_table_present(connection, module): changed = True if not module.check_mode: try: - route_table = connection.create_route_table(aws_retry=True, VpcId=vpc_id)['RouteTable'] + create_params = {"VpcId": vpc_id} + if tags: + create_params["TagSpecifications"] = boto3_tag_specifications(tags, types="route-table") + route_table = connection.create_route_table(aws_retry=True, **create_params)["RouteTable"] # try to wait for route table to be present before moving on - get_waiter( - connection, 'route_table_exists' - ).wait( - RouteTableIds=[route_table['RouteTableId']], + get_waiter(connection, "route_table_exists").wait( + RouteTableIds=[route_table["RouteTableId"]], ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout waiting for route table creation') + module.fail_json_aws(e, msg="Timeout waiting for route table creation") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating route table") else: @@ -766,31 +796,45 @@ def ensure_route_table_present(connection, module): module.exit_json(changed=changed, route_table=route_table) if routes is not None: - result = ensure_routes(connection=connection, module=module, route_table=route_table, - route_specs=routes, purge_routes=purge_routes) + result = ensure_routes( + connection=connection, module=module, route_table=route_table, route_specs=routes, purge_routes=purge_routes + ) changed = changed or result if propagating_vgw_ids is not None: - result = ensure_propagation(connection=connection, module=module, route_table=route_table, - propagating_vgw_ids=propagating_vgw_ids) + result = ensure_propagation( + connection=connection, module=module, route_table=route_table, propagating_vgw_ids=propagating_vgw_ids + ) changed = changed or result if not tags_valid and tags is not None: - changed |= ensure_ec2_tags(connection, module, route_table['RouteTableId'], - tags=tags, purge_tags=purge_tags, - retry_codes=['InvalidRouteTableID.NotFound']) - route_table['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) + changed |= ensure_ec2_tags( + connection, + module, + route_table["RouteTableId"], + tags=tags, + purge_tags=purge_tags, + retry_codes=["InvalidRouteTableID.NotFound"], + ) + route_table["Tags"] = describe_ec2_tags(connection, module, route_table["RouteTableId"]) if subnets is not None: associated_subnets = find_subnets(connection, module, vpc_id, subnets) - result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table, - subnets=associated_subnets, purge_subnets=purge_subnets) + result = ensure_subnet_associations( + connection=connection, + module=module, + route_table=route_table, + subnets=associated_subnets, + purge_subnets=purge_subnets, + ) changed = changed or result - if gateway_id == 'None' or gateway_id == '': + if gateway_id == "None" or gateway_id == "": gateway_changed = disassociate_gateway(connection=connection, module=module, route_table=route_table) elif gateway_id is not None: - gateway_changed = associate_gateway(connection=connection, module=module, route_table=route_table, gateway_id=gateway_id) + gateway_changed = associate_gateway( + connection=connection, module=module, route_table=route_table, gateway_id=gateway_id + ) else: gateway_changed = False @@ -804,40 +848,44 @@ def ensure_route_table_present(connection, module): def main(): argument_spec = dict( - gateway_id=dict(type='str'), - lookup=dict(default='tag', choices=['tag', 'id']), - propagating_vgw_ids=dict(type='list', elements='str'), - purge_routes=dict(default=True, type='bool'), - purge_subnets=dict(default=True, type='bool'), - purge_tags=dict(type='bool', default=True), + gateway_id=dict(type="str"), + lookup=dict(default="tag", choices=["tag", "id"]), + propagating_vgw_ids=dict(type="list", elements="str"), + purge_routes=dict(default=True, type="bool"), + purge_subnets=dict(default=True, type="bool"), + purge_tags=dict(type="bool", default=True), route_table_id=dict(), - routes=dict(default=[], type='list', elements='dict'), - state=dict(default='present', choices=['present', 'absent']), - subnets=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), - vpc_id=dict() + routes=dict(default=[], type="list", elements="dict"), + state=dict(default="present", choices=["present", "absent"]), + subnets=dict(type="list", elements="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + vpc_id=dict(), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['lookup', 'id', ['route_table_id']], - ['lookup', 'tag', ['vpc_id']], - ['state', 'present', ['vpc_id']]], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ["lookup", "id", ["route_table_id"]], + ["lookup", "tag", ["vpc_id"]], + ["state", "present", ["vpc_id"]], + ], + supports_check_mode=True, + ) # The tests for RouteTable existing uses its own decorator, we can safely # retry on InvalidRouteTableID.NotFound - retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['InvalidRouteTableID.NotFound']) - connection = module.client('ec2', retry_decorator=retry_decorator) + retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=["InvalidRouteTableID.NotFound"]) + connection = module.client("ec2", retry_decorator=retry_decorator) - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": result = ensure_route_table_present(connection, module) - elif state == 'absent': + elif state == "absent": result = ensure_route_table_absent(connection, module) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py index b7b3c69d4..d330299af 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_route_table_info version_added: 1.0.0 @@ -22,13 +20,14 @@ options: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. type: dict + default: {} extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all VPC route tables @@ -48,9 +47,9 @@ EXAMPLES = r''' amazon.aws.ec2_vpc_route_table_info: filters: vpc-id: vpc-abcdef00 -''' +""" -RETURN = r''' +RETURN = r""" route_tables: description: - A list of dictionarys describing route tables. @@ -186,7 +185,7 @@ route_tables: returned: always type: str sample: vpc-6e2d2407 -''' +""" try: import botocore @@ -195,45 +194,44 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff() def describe_route_tables_with_backoff(connection, **params): try: - paginator = connection.get_paginator('describe_route_tables') + paginator = connection.get_paginator("describe_route_tables") return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('InvalidRouteTableID.NotFound'): + except is_boto3_error_code("InvalidRouteTableID.NotFound"): return None def normalize_route(route): # Historically these were all there, but set to null when empty' - for legacy_key in ['DestinationCidrBlock', 'GatewayId', 'InstanceId', - 'Origin', 'State', 'NetworkInterfaceId']: + for legacy_key in ["DestinationCidrBlock", "GatewayId", "InstanceId", "Origin", "State", "NetworkInterfaceId"]: if legacy_key not in route: route[legacy_key] = None - route['InterfaceId'] = route['NetworkInterfaceId'] + route["InterfaceId"] = route["NetworkInterfaceId"] return route def normalize_association(assoc): # Name change between boto v2 and boto v3, return both - assoc['Id'] = assoc['RouteTableAssociationId'] + assoc["Id"] = assoc["RouteTableAssociationId"] return assoc def normalize_route_table(table): - table['tags'] = boto3_tag_list_to_ansible_dict(table['Tags']) - table['Associations'] = [normalize_association(assoc) for assoc in table['Associations']] - table['Routes'] = [normalize_route(route) for route in table['Routes']] - table['Id'] = table['RouteTableId'] - del table['Tags'] - return camel_dict_to_snake_dict(table, ignore_list=['tags']) + table["tags"] = boto3_tag_list_to_ansible_dict(table["Tags"]) + table["Associations"] = [normalize_association(assoc) for assoc in table["Associations"]] + table["Routes"] = [normalize_route(route) for route in table["Routes"]] + table["Id"] = table["RouteTableId"] + del table["Tags"] + return camel_dict_to_snake_dict(table, ignore_list=["tags"]) def normalize_results(results): @@ -242,15 +240,14 @@ def normalize_results(results): maintained and the shape of the return values are what people expect """ - routes = [normalize_route_table(route) for route in results['RouteTables']] - del results['RouteTables'] + routes = [normalize_route_table(route) for route in results["RouteTables"]] + del results["RouteTables"] results = camel_dict_to_snake_dict(results) - results['route_tables'] = routes + results["route_tables"] = routes return results def list_ec2_vpc_route_tables(connection, module): - filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) try: @@ -264,16 +261,15 @@ def list_ec2_vpc_route_tables(connection, module): def main(): argument_spec = dict( - filters=dict(default=None, type='dict'), + filters=dict(default={}, type="dict"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) list_ec2_vpc_route_tables(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py index ae806ae14..29c7c75f2 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_subnet version_added: 1.0.0 @@ -74,16 +72,14 @@ options: - Ignored unless I(wait=True). default: 300 type: int - tags: - default: {} extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create subnet for database servers @@ -114,9 +110,9 @@ EXAMPLES = ''' vpc_id: vpc-123456 cidr: 10.1.100.0/24 ipv6_cidr: '' -''' +""" -RETURN = ''' +RETURN = r""" subnet: description: Dictionary of subnet values returned: I(state=present) @@ -204,7 +200,7 @@ subnet: description: The CIDR block association state. returned: always type: str -''' +""" import time @@ -214,81 +210,82 @@ try: except ImportError: pass # caught by AnsibleAWSModule -from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_tag_filter_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter def get_subnet_info(subnet): - if 'Subnets' in subnet: - return [get_subnet_info(s) for s in subnet['Subnets']] - elif 'Subnet' in subnet: - subnet = camel_dict_to_snake_dict(subnet['Subnet']) + if "Subnets" in subnet: + return [get_subnet_info(s) for s in subnet["Subnets"]] + elif "Subnet" in subnet: + subnet = camel_dict_to_snake_dict(subnet["Subnet"]) else: subnet = camel_dict_to_snake_dict(subnet) - if 'tags' in subnet: - subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags']) + if "tags" in subnet: + subnet["tags"] = boto3_tag_list_to_ansible_dict(subnet["tags"]) else: - subnet['tags'] = dict() + subnet["tags"] = dict() - if 'subnet_id' in subnet: - subnet['id'] = subnet['subnet_id'] - del subnet['subnet_id'] + if "subnet_id" in subnet: + subnet["id"] = subnet["subnet_id"] + del subnet["subnet_id"] - subnet['ipv6_cidr_block'] = '' - subnet['ipv6_association_id'] = '' - ipv6set = subnet.get('ipv6_cidr_block_association_set') + subnet["ipv6_cidr_block"] = "" + subnet["ipv6_association_id"] = "" + ipv6set = subnet.get("ipv6_cidr_block_association_set") if ipv6set: for item in ipv6set: - if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'): - subnet['ipv6_cidr_block'] = item['ipv6_cidr_block'] - subnet['ipv6_association_id'] = item['association_id'] + if item.get("ipv6_cidr_block_state", {}).get("state") in ("associated", "associating"): + subnet["ipv6_cidr_block"] = item["ipv6_cidr_block"] + subnet["ipv6_association_id"] = item["association_id"] return subnet def waiter_params(module, params, start_time): - remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time()) - params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5} + remaining_wait_timeout = int(module.params["wait_timeout"] + start_time - time.time()) + params["WaiterConfig"] = {"Delay": 5, "MaxAttempts": remaining_wait_timeout // 5} return params def handle_waiter(conn, module, waiter_name, params, start_time): try: - get_waiter(conn, waiter_name).wait( - **waiter_params(module, params, start_time) - ) + get_waiter(conn, waiter_name).wait(**waiter_params(module, params, start_time)) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, "Failed to wait for updates to complete") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "An exception happened while trying to wait for updates") -def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, outpost_arn=None, az=None, start_time=None): - wait = module.params['wait'] +def create_subnet(conn, module, vpc_id, cidr, tags, ipv6_cidr=None, outpost_arn=None, az=None, start_time=None): + wait = module.params["wait"] - params = dict(VpcId=vpc_id, - CidrBlock=cidr) + params = dict(VpcId=vpc_id, CidrBlock=cidr) if ipv6_cidr: - params['Ipv6CidrBlock'] = ipv6_cidr + params["Ipv6CidrBlock"] = ipv6_cidr if az: - params['AvailabilityZone'] = az + params["AvailabilityZone"] = az + + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, types="subnet") if outpost_arn: if is_outpost_arn(outpost_arn): - params['OutpostArn'] = outpost_arn + params["OutpostArn"] = outpost_arn else: - module.fail_json('OutpostArn does not match the pattern specified in API specifications.') + module.fail_json("OutpostArn does not match the pattern specified in API specifications.") try: subnet = get_subnet_info(conn.create_subnet(aws_retry=True, **params)) @@ -298,28 +295,32 @@ def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, outpost_arn=None, # Sometimes AWS takes its time to create a subnet and so using # new subnets's id to do things like create tags results in # exception. - if wait and subnet.get('state') != 'available': - handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) - handle_waiter(conn, module, 'subnet_available', {'SubnetIds': [subnet['id']]}, start_time) - subnet['state'] = 'available' + if wait and subnet.get("state") != "available": + handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]]}, start_time) + handle_waiter(conn, module, "subnet_available", {"SubnetIds": [subnet["id"]]}, start_time) + subnet["state"] = "available" return subnet def ensure_tags(conn, module, subnet, tags, purge_tags, start_time): - changed = ensure_ec2_tags( - conn, module, subnet['id'], - resource_type='subnet', + conn, + module, + subnet["id"], + resource_type="subnet", purge_tags=purge_tags, tags=tags, - retry_codes=['InvalidSubnetID.NotFound']) + retry_codes=["InvalidSubnetID.NotFound"], + ) + + if not changed: + return changed - if module.params['wait'] and not module.check_mode: + if module.params["wait"] and not module.check_mode: # Wait for tags to be updated - filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()] - handle_waiter(conn, module, 'subnet_exists', - {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + filters = ansible_dict_to_boto3_filter_list(ansible_dict_to_tag_filter_dict(tags)) + handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]], "Filters": filters}, start_time) return changed @@ -328,8 +329,7 @@ def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time): if check_mode: return try: - conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet['id'], - MapPublicIpOnLaunch={'Value': map_public}) + conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet["id"], MapPublicIpOnLaunch={"Value": map_public}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't modify subnet attribute") @@ -338,44 +338,46 @@ def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, ch if check_mode: return try: - conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet['id'], - AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6}) + conn.modify_subnet_attribute( + aws_retry=True, SubnetId=subnet["id"], AssignIpv6AddressOnCreation={"Value": assign_instances_ipv6} + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't modify subnet attribute") def disassociate_ipv6_cidr(conn, module, subnet, start_time): - if subnet.get('assign_ipv6_address_on_creation'): + if subnet.get("assign_ipv6_address_on_creation"): ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time) try: - conn.disassociate_subnet_cidr_block(aws_retry=True, AssociationId=subnet['ipv6_association_id']) + conn.disassociate_subnet_cidr_block(aws_retry=True, AssociationId=subnet["ipv6_association_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}" - .format(subnet['ipv6_association_id'], subnet['id'])) + module.fail_json_aws( + e, + msg=f"Couldn't disassociate ipv6 cidr block id {subnet['ipv6_association_id']} from subnet {subnet['id']}", + ) # Wait for cidr block to be disassociated - if module.params['wait']: + if module.params["wait"]: filters = ansible_dict_to_boto3_filter_list( - {'ipv6-cidr-block-association.state': ['disassociated'], - 'vpc-id': subnet['vpc_id']} + {"ipv6-cidr-block-association.state": ["disassociated"], "vpc-id": subnet["vpc_id"]} ) - handle_waiter(conn, module, 'subnet_exists', - {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]], "Filters": filters}, start_time) def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time): - wait = module.params['wait'] + wait = module.params["wait"] changed = False - if subnet['ipv6_association_id'] and not ipv6_cidr: + if subnet["ipv6_association_id"] and not ipv6_cidr: if not check_mode: disassociate_ipv6_cidr(conn, module, subnet, start_time) changed = True if ipv6_cidr: - filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr, - 'vpc-id': subnet['vpc_id']}) + filters = ansible_dict_to_boto3_filter_list( + {"ipv6-cidr-block-association.ipv6-cidr-block": ipv6_cidr, "vpc-id": subnet["vpc_id"]} + ) try: _subnets = conn.describe_subnets(aws_retry=True, Filters=filters) @@ -383,43 +385,52 @@ def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_ti except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get subnet info") - if check_subnets and check_subnets[0]['ipv6_cidr_block']: - module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr)) + if check_subnets and check_subnets[0]["ipv6_cidr_block"]: + module.fail_json(msg=f"The IPv6 CIDR '{ipv6_cidr}' conflicts with another subnet") - if subnet['ipv6_association_id']: + if subnet["ipv6_association_id"]: if not check_mode: disassociate_ipv6_cidr(conn, module, subnet, start_time) changed = True try: if not check_mode: - associate_resp = conn.associate_subnet_cidr_block(aws_retry=True, SubnetId=subnet['id'], - Ipv6CidrBlock=ipv6_cidr) + associate_resp = conn.associate_subnet_cidr_block( + aws_retry=True, SubnetId=subnet["id"], Ipv6CidrBlock=ipv6_cidr + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id'])) + module.fail_json_aws(e, msg=f"Couldn't associate ipv6 cidr {ipv6_cidr} to {subnet['id']}") else: if not check_mode and wait: filters = ansible_dict_to_boto3_filter_list( - {'ipv6-cidr-block-association.state': ['associated'], - 'vpc-id': subnet['vpc_id']} + {"ipv6-cidr-block-association.state": ["associated"], "vpc-id": subnet["vpc_id"]} + ) + handle_waiter( + conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]], "Filters": filters}, start_time + ) + + if associate_resp.get("Ipv6CidrBlockAssociation", {}).get("AssociationId"): + subnet["ipv6_association_id"] = associate_resp["Ipv6CidrBlockAssociation"]["AssociationId"] + subnet["ipv6_cidr_block"] = associate_resp["Ipv6CidrBlockAssociation"]["Ipv6CidrBlock"] + if subnet["ipv6_cidr_block_association_set"]: + subnet["ipv6_cidr_block_association_set"][0] = camel_dict_to_snake_dict( + associate_resp["Ipv6CidrBlockAssociation"] ) - handle_waiter(conn, module, 'subnet_exists', - {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) - - if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'): - subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId'] - subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] - if subnet['ipv6_cidr_block_association_set']: - subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']) else: - subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])) + subnet["ipv6_cidr_block_association_set"].append( + camel_dict_to_snake_dict(associate_resp["Ipv6CidrBlockAssociation"]) + ) return changed +def _matching_subnet_filters(vpc_id, cidr): + return ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "cidr-block": cidr}) + + def get_matching_subnet(conn, module, vpc_id, cidr): - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr}) + filters = _matching_subnet_filters(vpc_id, cidr) try: _subnets = conn.describe_subnets(aws_retry=True, Filters=filters) subnets = get_subnet_info(_subnets) @@ -433,7 +444,7 @@ def get_matching_subnet(conn, module, vpc_id, cidr): def ensure_subnet_present(conn, module): - subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) changed = False # Initialize start so max time does not exceed the specified wait_timeout for multiple operations @@ -441,46 +452,53 @@ def ensure_subnet_present(conn, module): if subnet is None: if not module.check_mode: - subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'], - ipv6_cidr=module.params['ipv6_cidr'], outpost_arn=module.params['outpost_arn'], - az=module.params['az'], start_time=start_time) + subnet = create_subnet( + conn, + module, + module.params["vpc_id"], + module.params["cidr"], + module.params["tags"], + ipv6_cidr=module.params["ipv6_cidr"], + outpost_arn=module.params["outpost_arn"], + az=module.params["az"], + start_time=start_time, + ) changed = True # Subnet will be None when check_mode is true if subnet is None: - return { - 'changed': changed, - 'subnet': {} - } - if module.params['wait']: - handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) - - if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'): - if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time): + return {"changed": changed, "subnet": {}} + if module.params["wait"]: + handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]]}, start_time) + + if module.params["ipv6_cidr"] != subnet.get("ipv6_cidr_block"): + if ensure_ipv6_cidr_block(conn, module, subnet, module.params["ipv6_cidr"], module.check_mode, start_time): changed = True - if module.params['map_public'] != subnet['map_public_ip_on_launch']: - ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time) + if module.params["map_public"] != subnet["map_public_ip_on_launch"]: + ensure_map_public(conn, module, subnet, module.params["map_public"], module.check_mode, start_time) changed = True - if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'): - ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time) + if module.params["assign_instances_ipv6"] != subnet.get("assign_ipv6_address_on_creation"): + ensure_assign_ipv6_on_create( + conn, module, subnet, module.params["assign_instances_ipv6"], module.check_mode, start_time + ) changed = True - if module.params['tags'] != subnet['tags']: - stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items()) - if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time): - changed = True + if ensure_tags(conn, module, subnet, module.params["tags"], module.params["purge_tags"], start_time): + changed = True - subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) - if not module.check_mode and module.params['wait']: + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) + if not module.check_mode and module.params["wait"]: + subnet_filter = _matching_subnet_filters(module.params["vpc_id"], module.params["cidr"]) + handle_waiter(conn, module, "subnet_exists", {"Filters": subnet_filter}, start_time) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) + if not subnet: + module.fail_json("Failed to describe newly created subnet") # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation # so we only wait for those if necessary just before returning the subnet subnet = ensure_final_subnet(conn, module, subnet, start_time) - return { - 'changed': changed, - 'subnet': subnet - } + return {"changed": changed, "subnet": subnet} def ensure_final_subnet(conn, module, subnet, start_time): @@ -488,42 +506,42 @@ def ensure_final_subnet(conn, module, subnet, start_time): map_public_correct = False assign_ipv6_correct = False - if module.params['map_public'] == subnet['map_public_ip_on_launch']: + if module.params["map_public"] == subnet["map_public_ip_on_launch"]: map_public_correct = True else: - if module.params['map_public']: - handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time) + if module.params["map_public"]: + handle_waiter(conn, module, "subnet_has_map_public", {"SubnetIds": [subnet["id"]]}, start_time) else: - handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time) + handle_waiter(conn, module, "subnet_no_map_public", {"SubnetIds": [subnet["id"]]}, start_time) - if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'): + if module.params["assign_instances_ipv6"] == subnet.get("assign_ipv6_address_on_creation"): assign_ipv6_correct = True else: - if module.params['assign_instances_ipv6']: - handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + if module.params["assign_instances_ipv6"]: + handle_waiter(conn, module, "subnet_has_assign_ipv6", {"SubnetIds": [subnet["id"]]}, start_time) else: - handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + handle_waiter(conn, module, "subnet_no_assign_ipv6", {"SubnetIds": [subnet["id"]]}, start_time) if map_public_correct and assign_ipv6_correct: break time.sleep(5) - subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) return subnet def ensure_subnet_absent(conn, module): - subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) if subnet is None: - return {'changed': False} + return {"changed": False} try: if not module.check_mode: - conn.delete_subnet(aws_retry=True, SubnetId=subnet['id']) - if module.params['wait']: - handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time()) - return {'changed': True} + conn.delete_subnet(aws_retry=True, SubnetId=subnet["id"]) + if module.params["wait"]: + handle_waiter(conn, module, "subnet_deleted", {"SubnetIds": [subnet["id"]]}, time.time()) + return {"changed": True} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete subnet") @@ -532,37 +550,37 @@ def main(): argument_spec = dict( az=dict(default=None, required=False), cidr=dict(required=True), - ipv6_cidr=dict(default='', required=False), - outpost_arn=dict(default='', type='str', required=False), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']), + ipv6_cidr=dict(default="", required=False), + outpost_arn=dict(default="", type="str", required=False), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), vpc_id=dict(required=True), - map_public=dict(default=False, required=False, type='bool'), - assign_instances_ipv6=dict(default=False, required=False, type='bool'), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300, required=False), - purge_tags=dict(default=True, type='bool') + map_public=dict(default=False, required=False, type="bool"), + assign_instances_ipv6=dict(default=False, required=False, type="bool"), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300, required=False), + purge_tags=dict(default=True, type="bool"), ) - required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])] + required_if = [("assign_instances_ipv6", True, ["ipv6_cidr"])] module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) - if module.params.get('outpost_arn') and not module.params.get('az'): + if module.params.get("outpost_arn") and not module.params.get("az"): module.fail_json(msg="To specify OutpostArn, you must specify the Availability Zone of the Outpost subnet.") - if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'): + if module.params.get("assign_instances_ipv6") and not module.params.get("ipv6_cidr"): module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string") retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) - state = module.params.get('state') + state = module.params.get("state") try: - if state == 'present': + if state == "present": result = ensure_subnet_present(connection, module) - elif state == 'absent': + elif state == "absent": result = ensure_subnet_absent(connection, module) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) @@ -570,5 +588,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py index bbf1b976a..654f5609a 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_subnet_info version_added: 1.0.0 @@ -29,12 +27,12 @@ options: type: dict default: {} extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all VPC subnets @@ -70,9 +68,9 @@ EXAMPLES = ''' - set_fact: subnet_ids: "{{ subnet_info.results | sum(attribute='subnets', start=[]) | map(attribute='subnet_id') }}" -''' +""" -RETURN = ''' +RETURN = r""" subnets: description: Returns an array of complex objects as described below. returned: success @@ -144,7 +142,7 @@ subnets: description: The CIDR block association state. returned: always type: str -''' +""" try: import botocore @@ -153,10 +151,10 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.exponential_backoff() @@ -179,8 +177,8 @@ def describe_subnets(connection, module): connection : boto3 client connection object """ # collect parameters - filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - subnet_ids = module.params.get('subnet_ids') + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + subnet_ids = module.params.get("subnet_ids") if subnet_ids is None: # Set subnet_ids to empty list if it is None @@ -193,33 +191,30 @@ def describe_subnets(connection, module): try: response = describe_subnets_with_backoff(connection, subnet_ids, filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe subnets') + module.fail_json_aws(e, msg="Failed to describe subnets") - for subnet in response['Subnets']: + for subnet in response["Subnets"]: # for backwards compatibility - subnet['id'] = subnet['SubnetId'] + subnet["id"] = subnet["SubnetId"] subnet_info.append(camel_dict_to_snake_dict(subnet)) # convert tag list to ansible dict - subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', [])) + subnet_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(subnet.get("Tags", [])) module.exit_json(subnets=subnet_info) def main(): argument_spec = dict( - subnet_ids=dict(type='list', elements='str', default=[], aliases=['subnet_id']), - filters=dict(type='dict', default={}) + subnet_ids=dict(type="list", elements="str", default=[], aliases=["subnet_id"]), + filters=dict(type="dict", default={}), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + connection = module.client("ec2") describe_subnets(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py index 6f9cd1c86..ac3bb3642 100644 --- a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py +++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py @@ -1,24 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: elb_application_lb version_added: 5.0.0 @@ -223,17 +209,17 @@ options: version_added: 3.2.0 version_added_collection: community.aws extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an ALB and attach a listener @@ -348,10 +334,9 @@ EXAMPLES = r''' - amazon.aws.elb_application_lb: name: myalb state: absent +""" -''' - -RETURN = r''' +RETURN = r""" access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. returned: when state is present @@ -534,49 +519,49 @@ waf_fail_open_enabled: returned: when state is present type: bool sample: false -''' +""" + try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ( - ApplicationLoadBalancer, - ELBListener, - ELBListenerRule, - ELBListenerRules, - ELBListeners, -) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ApplicationLoadBalancer +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListener +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListenerRule +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListenerRules +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListeners +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff() def describe_sgs_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_security_groups') - return paginator.paginate(**params).build_full_result()['SecurityGroups'] + paginator = connection.get_paginator("describe_security_groups") + return paginator.paginate(**params).build_full_result()["SecurityGroups"] def find_default_sg(connection, module, vpc_id): """ Finds the default security group for the given VPC ID. """ - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'group-name': 'default'}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "group-name": "default"}) try: sg = describe_sgs_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='No default security group found for VPC {0}'.format(vpc_id)) + module.fail_json_aws(e, msg=f"No default security group found for VPC {vpc_id}") if len(sg) == 1: - return sg[0]['GroupId'] + return sg[0]["GroupId"] elif len(sg) == 0: - module.fail_json(msg='No default security group found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f"No default security group found for VPC {vpc_id}") else: - module.fail_json(msg='Multiple security groups named "default" found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f'Multiple security groups named "default" found for VPC {vpc_id}') def create_or_update_alb(alb_obj): @@ -586,31 +571,33 @@ def create_or_update_alb(alb_obj): # Subnets if not alb_obj.compare_subnets(): if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") alb_obj.modify_subnets() # Security Groups if not alb_obj.compare_security_groups(): if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") alb_obj.modify_security_groups() # ALB attributes if not alb_obj.compare_elb_attributes(): if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") alb_obj.update_elb_attributes() alb_obj.modify_elb_attributes() # Tags - only need to play with tags if tags parameter has been set to something if alb_obj.tags is not None: - - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(alb_obj.elb['tags']), - boto3_tag_list_to_ansible_dict(alb_obj.tags), alb_obj.purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(alb_obj.elb["tags"]), + boto3_tag_list_to_ansible_dict(alb_obj.tags), + alb_obj.purge_tags, + ) # Exit on check_mode if alb_obj.module.check_mode and (tags_need_modify or tags_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") # Delete necessary tags if tags_to_delete: @@ -623,7 +610,7 @@ def create_or_update_alb(alb_obj): else: # Create load balancer if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have created ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have created ALB if not in check mode.") alb_obj.create_elb() # Add ALB attributes @@ -631,28 +618,32 @@ def create_or_update_alb(alb_obj): alb_obj.modify_elb_attributes() # Listeners - listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb["LoadBalancerArn"]) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() # Exit on check_mode if alb_obj.module.check_mode and (listeners_to_add or listeners_to_modify or listeners_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") # Delete listeners for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb["LoadBalancerArn"] + ) listener_obj.delete() listeners_obj.changed = True # Add listeners for listener_to_add in listeners_to_add: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb["LoadBalancerArn"]) listener_obj.add() listeners_obj.changed = True # Modify listeners for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb["LoadBalancerArn"] + ) listener_obj.modify() listeners_obj.changed = True @@ -662,18 +653,32 @@ def create_or_update_alb(alb_obj): # Rules of each listener for listener in listeners_obj.listeners: - if 'Rules' in listener: - rules_obj = ELBListenerRules(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) - rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules() + if "Rules" in listener: + rules_obj = ELBListenerRules( + alb_obj.connection, alb_obj.module, alb_obj.elb["LoadBalancerArn"], listener["Rules"], listener["Port"] + ) + rules_to_add, rules_to_modify, rules_to_delete, rules_to_set_priority = rules_obj.compare_rules() # Exit on check_mode - if alb_obj.module.check_mode and (rules_to_add or rules_to_modify or rules_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + if alb_obj.module.check_mode and ( + rules_to_add or rules_to_modify or rules_to_delete or rules_to_set_priority + ): + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") + + # Set rules priorities + if rules_to_set_priority: + rule_obj = ELBListenerRule( + alb_obj.connection, alb_obj.module, rules_to_set_priority, rules_obj.listener_arn + ) + rule_obj.set_rule_priorities() + alb_obj.changed |= rule_obj.changed # Delete rules - if alb_obj.module.params['purge_rules']: + if alb_obj.module.params["purge_rules"]: for rule in rules_to_delete: - rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) + rule_obj = ELBListenerRule( + alb_obj.connection, alb_obj.module, {"RuleArn": rule}, rules_obj.listener_arn + ) rule_obj.delete() alb_obj.changed = True @@ -690,16 +695,18 @@ def create_or_update_alb(alb_obj): alb_obj.changed = True # Update ALB ip address type only if option has been provided - if alb_obj.module.params.get('ip_address_type') and alb_obj.elb_ip_addr_type != alb_obj.module.params.get('ip_address_type'): + if alb_obj.module.params.get("ip_address_type") and alb_obj.elb_ip_addr_type != alb_obj.module.params.get( + "ip_address_type" + ): # Exit on check_mode if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") - alb_obj.modify_ip_address_type(alb_obj.module.params.get('ip_address_type')) + alb_obj.modify_ip_address_type(alb_obj.module.params.get("ip_address_type")) # Exit on check_mode - no changes if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - no changes to make to ALB specified.') + alb_obj.module.exit_json(changed=False, msg="IN CHECK MODE - no changes to make to ALB specified.") # Get the ALB again alb_obj.update() @@ -713,123 +720,119 @@ def create_or_update_alb(alb_obj): # Convert to snake_case and merge in everything we want to return to the user snaked_alb = camel_dict_to_snake_dict(alb_obj.elb) snaked_alb.update(camel_dict_to_snake_dict(alb_obj.elb_attributes)) - snaked_alb['listeners'] = [] + snaked_alb["listeners"] = [] for listener in listeners_obj.current_listeners: # For each listener, get listener rules - listener['rules'] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener['ListenerArn']) - snaked_alb['listeners'].append(camel_dict_to_snake_dict(listener)) + listener["rules"] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener["ListenerArn"]) + snaked_alb["listeners"].append(camel_dict_to_snake_dict(listener)) # Change tags to ansible friendly dict - snaked_alb['tags'] = boto3_tag_list_to_ansible_dict(snaked_alb['tags']) + snaked_alb["tags"] = boto3_tag_list_to_ansible_dict(snaked_alb["tags"]) # ip address type - snaked_alb['ip_address_type'] = alb_obj.get_elb_ip_address_type() + snaked_alb["ip_address_type"] = alb_obj.get_elb_ip_address_type() alb_obj.module.exit_json(changed=alb_obj.changed, **snaked_alb) def delete_alb(alb_obj): - if alb_obj.elb: - # Exit on check_mode if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have deleted ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have deleted ALB if not in check mode.") - listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) - for listener_to_delete in [i['ListenerArn'] for i in listeners_obj.current_listeners]: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb["LoadBalancerArn"]) + for listener_to_delete in [i["ListenerArn"] for i in listeners_obj.current_listeners]: + listener_obj = ELBListener( + alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb["LoadBalancerArn"] + ) listener_obj.delete() alb_obj.delete() else: - # Exit on check_mode - no changes if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - ALB already absent.') + alb_obj.module.exit_json(changed=False, msg="IN CHECK MODE - ALB already absent.") alb_obj.module.exit_json(changed=alb_obj.changed) def main(): - argument_spec = dict( - access_logs_enabled=dict(type='bool'), - access_logs_s3_bucket=dict(type='str'), - access_logs_s3_prefix=dict(type='str'), - deletion_protection=dict(type='bool'), - http2=dict(type='bool'), - http_desync_mitigation_mode=dict(type='str', choices=['monitor', 'defensive', 'strictest']), - http_drop_invalid_header_fields=dict(type='bool'), - http_x_amzn_tls_version_and_cipher_suite=dict(type='bool'), - http_xff_client_port=dict(type='bool'), - idle_timeout=dict(type='int'), - listeners=dict(type='list', - elements='dict', - options=dict( - Protocol=dict(type='str', required=True), - Port=dict(type='int', required=True), - SslPolicy=dict(type='str'), - Certificates=dict(type='list', elements='dict'), - DefaultActions=dict(type='list', required=True, elements='dict'), - Rules=dict(type='list', elements='dict') - ) - ), - name=dict(required=True, type='str'), - purge_listeners=dict(default=True, type='bool'), - purge_tags=dict(default=True, type='bool'), - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), - state=dict(choices=['present', 'absent'], default='present'), - tags=dict(type='dict', aliases=['resource_tags']), - waf_fail_open=dict(type='bool'), - wait_timeout=dict(type='int'), - wait=dict(default=False, type='bool'), - purge_rules=dict(default=True, type='bool'), - ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) + access_logs_enabled=dict(type="bool"), + access_logs_s3_bucket=dict(type="str"), + access_logs_s3_prefix=dict(type="str"), + deletion_protection=dict(type="bool"), + http2=dict(type="bool"), + http_desync_mitigation_mode=dict(type="str", choices=["monitor", "defensive", "strictest"]), + http_drop_invalid_header_fields=dict(type="bool"), + http_x_amzn_tls_version_and_cipher_suite=dict(type="bool"), + http_xff_client_port=dict(type="bool"), + idle_timeout=dict(type="int"), + listeners=dict( + type="list", + elements="dict", + options=dict( + Protocol=dict(type="str", required=True), + Port=dict(type="int", required=True), + SslPolicy=dict(type="str"), + Certificates=dict(type="list", elements="dict"), + DefaultActions=dict(type="list", required=True, elements="dict"), + Rules=dict(type="list", elements="dict"), + ), + ), + name=dict(required=True, type="str"), + purge_listeners=dict(default=True, type="bool"), + purge_tags=dict(default=True, type="bool"), + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + scheme=dict(default="internet-facing", choices=["internet-facing", "internal"]), + state=dict(choices=["present", "absent"], default="present"), + tags=dict(type="dict", aliases=["resource_tags"]), + waf_fail_open=dict(type="bool"), + wait_timeout=dict(type="int"), + wait=dict(default=False, type="bool"), + purge_rules=dict(default=True, type="bool"), + ip_address_type=dict(type="str", choices=["ipv4", "dualstack"]), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['subnets', 'security_groups']) - ], - required_together=[ - ['access_logs_enabled', 'access_logs_s3_bucket'] - ], - supports_check_mode=True, - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["subnets", "security_groups"])], + required_together=[["access_logs_enabled", "access_logs_s3_bucket"]], + supports_check_mode=True, + ) # Quick check of listeners parameters listeners = module.params.get("listeners") if listeners is not None: for listener in listeners: for key in listener.keys(): - if key == 'Protocol' and listener[key] == 'HTTPS': - if listener.get('SslPolicy') is None: + if key == "Protocol" and listener[key] == "HTTPS": + if listener.get("SslPolicy") is None: module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS") - if listener.get('Certificates') is None: + if listener.get("Certificates") is None: module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS") - connection = module.client('elbv2') - connection_ec2 = module.client('ec2') + connection = module.client("elbv2") + connection_ec2 = module.client("ec2") state = module.params.get("state") alb = ApplicationLoadBalancer(connection, connection_ec2, module) # Update security group if default is specified - if alb.elb and module.params.get('security_groups') == []: - module.params['security_groups'] = [find_default_sg(connection_ec2, module, alb.elb['VpcId'])] + if alb.elb and module.params.get("security_groups") == []: + module.params["security_groups"] = [find_default_sg(connection_ec2, module, alb.elb["VpcId"])] alb = ApplicationLoadBalancer(connection, connection_ec2, module) - if state == 'present': + if state == "present": create_or_update_alb(alb) - elif state == 'absent': + elif state == "absent": delete_alb(alb) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py index 42ad25a85..cc342dc0d 100644 --- a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_application_lb_info version_added: 5.0.0 @@ -29,20 +27,48 @@ options: required: false type: list elements: str + include_attributes: + description: + - Whether or not to include load balancer attributes in the response. + required: false + type: bool + default: true + version_added: 7.0.0 + include_listeners: + description: + - Whether or not to include load balancer listeners in the response. + required: false + type: bool + default: true + version_added: 7.0.0 + include_listener_rules: + description: + - Whether or not to include load balancer listener rules in the response. + - Implies I(include_listeners=true) + required: false + type: bool + default: true + version_added: 7.0.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all ALBs amazon.aws.elb_application_lb_info: +# Equivalent to aws elbv2 describe-load-balancers +- name: Gather minimal information about all ALBs + amazon.aws.elb_application_lb_info: + include_attributes: false + include_listeners: false + include_listener_rules: false + - name: Gather information about a particular ALB given its ARN amazon.aws.elb_application_lb_info: load_balancer_arns: @@ -61,9 +87,9 @@ EXAMPLES = r''' register: alb_info - ansible.builtin.debug: var: alb_info -''' +""" -RETURN = r''' +RETURN = r""" load_balancers: description: a list of load balancers returned: always @@ -71,14 +97,17 @@ load_balancers: contains: access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. + returned: when include_attributes is true type: str sample: "mys3bucket" access_logs_s3_enabled: description: Indicates whether access logs stored in Amazon S3 are enabled. + returned: when include_attributes is true type: bool sample: true access_logs_s3_prefix: description: The prefix for the location in the S3 bucket. + returned: when include_attributes is true type: str sample: "my/logs" availability_zones: @@ -95,6 +124,7 @@ load_balancers: sample: "2015-02-12T02:14:02+00:00" deletion_protection_enabled: description: Indicates whether deletion protection is enabled. + returned: when include_attributes is true type: bool sample: true dns_name: @@ -103,6 +133,7 @@ load_balancers: sample: "internal-my-alb-123456789.ap-southeast-2.elb.amazonaws.com" idle_timeout_timeout_seconds: description: The idle timeout value, in seconds. + returned: when include_attributes is true type: int sample: 60 ip_address_type: @@ -111,6 +142,7 @@ load_balancers: sample: "ipv4" listeners: description: Information about the listeners. + returned: when include_listeners or include_listener_rules is true type: complex contains: listener_arn: @@ -129,6 +161,11 @@ load_balancers: description: The protocol for connections from clients to the load balancer. type: str sample: "HTTPS" + rules: + description: List of listener rules. + returned: when include_listener_rules is true + type: list + sample: "" certificates: description: The SSL server certificate. type: complex @@ -161,24 +198,34 @@ load_balancers: description: The name of the load balancer. type: str sample: "my-alb" + load_balancing_cross_zone_enabled: + description: Indicates whether or not cross-zone load balancing is enabled. + returned: when include_attributes is true + type: bool + sample: true routing_http2_enabled: description: Indicates whether HTTP/2 is enabled. + returned: when include_attributes is true type: bool sample: true routing_http_desync_mitigation_mode: description: Determines how the load balancer handles requests that might pose a security risk to an application. + returned: when include_attributes is true type: str sample: "defensive" routing_http_drop_invalid_header_fields_enabled: description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). + returned: when include_attributes is true type: bool sample: false routing_http_x_amzn_tls_version_and_cipher_suite_enabled: description: Indicates whether the two headers are added to the client request before sending it to the target. + returned: when include_attributes is true type: bool sample: false routing_http_xff_client_port_enabled: description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + returned: when include_attributes is true type: bool sample: false scheme: @@ -210,9 +257,10 @@ load_balancers: waf_fail_open_enabled: description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. + returned: when include_attributes is true type: bool sample: false -''' +""" try: import botocore @@ -221,66 +269,73 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict @AWSRetry.jittered_backoff(retries=10) def get_paginator(connection, **kwargs): - paginator = connection.get_paginator('describe_load_balancers') + paginator = connection.get_paginator("describe_load_balancers") return paginator.paginate(**kwargs).build_full_result() def get_alb_listeners(connection, module, alb_arn): - try: - return connection.describe_listeners(LoadBalancerArn=alb_arn)['Listeners'] + return connection.describe_listeners( + aws_retry=True, + LoadBalancerArn=alb_arn, + )["Listeners"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe alb listeners") def get_listener_rules(connection, module, listener_arn): - try: - return connection.describe_rules(ListenerArn=listener_arn)['Rules'] + return connection.describe_rules( + aws_retry=True, + ListenerArn=listener_arn, + )["Rules"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe listener rules") def get_load_balancer_attributes(connection, module, load_balancer_arn): - try: - load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes']) + attributes = connection.describe_load_balancer_attributes( + aws_retry=True, + LoadBalancerArn=load_balancer_arn, + )["Attributes"] + load_balancer_attributes = boto3_tag_list_to_ansible_dict(attributes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe load balancer attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley for k, v in list(load_balancer_attributes.items()): - load_balancer_attributes[k.replace('.', '_')] = v + load_balancer_attributes[k.replace(".", "_")] = v del load_balancer_attributes[k] return load_balancer_attributes def get_load_balancer_tags(connection, module, load_balancer_arn): - try: - return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags']) + tag_descriptions = connection.describe_tags( + aws_retry=True, + ResourceArns=[load_balancer_arn], + )["TagDescriptions"] + return boto3_tag_list_to_ansible_dict(tag_descriptions[0]["Tags"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe load balancer tags") -def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn): - try: - return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe load balancer ip address type") - - def list_load_balancers(connection, module): load_balancer_arns = module.params.get("load_balancer_arns") names = module.params.get("names") + include_attributes = module.params.get("include_attributes") + include_listeners = module.params.get("include_listeners") + include_listener_rules = module.params.get("include_listener_rules") try: if not load_balancer_arns and not names: @@ -289,55 +344,64 @@ def list_load_balancers(connection, module): load_balancers = get_paginator(connection, LoadBalancerArns=load_balancer_arns) if names: load_balancers = get_paginator(connection, Names=names) - except is_boto3_error_code('LoadBalancerNotFound'): + except is_boto3_error_code("LoadBalancerNotFound"): module.exit_json(load_balancers=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to list load balancers") - for load_balancer in load_balancers['LoadBalancers']: + for load_balancer in load_balancers["LoadBalancers"]: # Get the attributes for each alb - load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn'])) + if include_attributes: + load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer["LoadBalancerArn"])) # Get the listeners for each alb - load_balancer['listeners'] = get_alb_listeners(connection, module, load_balancer['LoadBalancerArn']) + if include_listeners or include_listener_rules: + load_balancer["listeners"] = get_alb_listeners(connection, module, load_balancer["LoadBalancerArn"]) # For each listener, get listener rules - for listener in load_balancer['listeners']: - listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) - - # Get ALB ip address type - load_balancer['IpAddressType'] = get_load_balancer_ipaddresstype(connection, module, load_balancer['LoadBalancerArn']) + if include_listener_rules: + for listener in load_balancer["listeners"]: + listener["rules"] = get_listener_rules(connection, module, listener["ListenerArn"]) # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']] + snaked_load_balancers = [ + camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers["LoadBalancers"] + ] # Get tags for each load balancer for snaked_load_balancer in snaked_load_balancers: - snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn']) + snaked_load_balancer["tags"] = get_load_balancer_tags( + connection, module, snaked_load_balancer["load_balancer_arn"] + ) module.exit_json(load_balancers=snaked_load_balancers) def main(): - argument_spec = dict( - load_balancer_arns=dict(type='list', elements='str'), - names=dict(type='list', elements='str') + load_balancer_arns=dict(type="list", elements="str"), + names=dict(type="list", elements="str"), + include_attributes=dict(default=True, type="bool"), + include_listeners=dict(default=True, type="bool"), + include_listener_rules=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['load_balancer_arns', 'names']], + mutually_exclusive=[["load_balancer_arns", "names"]], supports_check_mode=True, ) try: - connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_load_balancers(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py index 5d49d92f6..4008b8029 100644 --- a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py +++ b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elb_classic_lb version_added: 1.0.0 @@ -282,13 +280,13 @@ notes: - Support for I(purge_tags) was added in release 2.1.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. @@ -304,7 +302,7 @@ EXAMPLES = """ - protocol: http # options are http, https, ssl, tcp load_balancer_port: 80 instance_port: 80 - proxy_protocol: True + proxy_protocol: true - protocol: https load_balancer_port: 443 instance_protocol: http # optional, defaults to value of protocol setting @@ -340,17 +338,17 @@ EXAMPLES = """ load_balancer_port: 80 instance_port: 80 health_check: - ping_protocol: http # options are http, https, ssl, tcp - ping_port: 80 - ping_path: "/index.html" # not required for tcp or ssl - response_timeout: 5 # seconds - interval: 30 # seconds - unhealthy_threshold: 2 - healthy_threshold: 10 + ping_protocol: http # options are http, https, ssl, tcp + ping_port: 80 + ping_path: "/index.html" # not required for tcp or ssl + response_timeout: 5 # seconds + interval: 30 # seconds + unhealthy_threshold: 2 + healthy_threshold: 10 access_logs: - interval: 5 # minutes (defaults to 60) - s3_location: "my-bucket" # This value is required if access_logs is set - s3_prefix: "logs" + interval: 5 # minutes (defaults to 60) + s3_location: "my-bucket" # This value is required if access_logs is set + s3_prefix: "logs" # Ensure ELB is gone - amazon.aws.elb_classic_lb: @@ -490,7 +488,7 @@ EXAMPLES = """ tags: {} """ -RETURN = ''' +RETURN = r""" elb: description: Load Balancer attributes returned: always @@ -670,73 +668,72 @@ elb: elements: str sample: ['us-east-1b', 'us-east-1a'] returned: when state is not 'absent' -''' +""" try: import botocore except ImportError: pass # Taken care of by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -class ElbManager(object): +class ElbManager: """Handles ELB creation and destruction""" def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.listeners = module.params['listeners'] - self.purge_listeners = module.params['purge_listeners'] - self.instance_ids = module.params['instance_ids'] - self.purge_instance_ids = module.params['purge_instance_ids'] - self.zones = module.params['zones'] - self.purge_zones = module.params['purge_zones'] - self.health_check = module.params['health_check'] - self.access_logs = module.params['access_logs'] - self.subnets = module.params['subnets'] - self.purge_subnets = module.params['purge_subnets'] - self.scheme = module.params['scheme'] - self.connection_draining_timeout = module.params['connection_draining_timeout'] - self.idle_timeout = module.params['idle_timeout'] - self.cross_az_load_balancing = module.params['cross_az_load_balancing'] - self.stickiness = module.params['stickiness'] - self.wait = module.params['wait'] - self.wait_timeout = module.params['wait_timeout'] - self.tags = module.params['tags'] - self.purge_tags = module.params['purge_tags'] + self.name = module.params["name"] + self.listeners = module.params["listeners"] + self.purge_listeners = module.params["purge_listeners"] + self.instance_ids = module.params["instance_ids"] + self.purge_instance_ids = module.params["purge_instance_ids"] + self.zones = module.params["zones"] + self.purge_zones = module.params["purge_zones"] + self.health_check = module.params["health_check"] + self.access_logs = module.params["access_logs"] + self.subnets = module.params["subnets"] + self.purge_subnets = module.params["purge_subnets"] + self.scheme = module.params["scheme"] + self.connection_draining_timeout = module.params["connection_draining_timeout"] + self.idle_timeout = module.params["idle_timeout"] + self.cross_az_load_balancing = module.params["cross_az_load_balancing"] + self.stickiness = module.params["stickiness"] + self.wait = module.params["wait"] + self.wait_timeout = module.params["wait_timeout"] + self.tags = module.params["tags"] + self.purge_tags = module.params["purge_tags"] self.changed = False - self.status = 'gone' + self.status = "gone" retry_decorator = AWSRetry.jittered_backoff() - self.client = self.module.client('elb', retry_decorator=retry_decorator) - self.ec2_client = self.module.client('ec2', retry_decorator=retry_decorator) + self.client = self.module.client("elb", retry_decorator=retry_decorator) + self.ec2_client = self.module.client("ec2", retry_decorator=retry_decorator) - security_group_names = module.params['security_group_names'] - self.security_group_ids = module.params['security_group_ids'] + security_group_names = module.params["security_group_names"] + self.security_group_ids = module.params["security_group_ids"] self._update_descriptions() if security_group_names: # Use the subnets attached to the VPC to find which VPC we're in and # limit the search - if self.elb and self.elb.get('Subnets', None): - subnets = set(self.elb.get('Subnets') + list(self.subnets or [])) + if self.elb and self.elb.get("Subnets", None): + subnets = set(self.elb.get("Subnets") + list(self.subnets or [])) else: subnets = set(self.subnets) if subnets: @@ -745,27 +742,31 @@ class ElbManager(object): vpc_id = None try: self.security_group_ids = self._get_ec2_security_group_ids_from_names( - sec_group_list=security_group_names, vpc_id=vpc_id) + sec_group_list=security_group_names, vpc_id=vpc_id + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to convert security group names to IDs, try using security group IDs rather than names") + module.fail_json_aws( + e, + msg="Failed to convert security group names to IDs, try using security group IDs rather than names", + ) def _update_descriptions(self): try: self.elb = self._get_elb() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer') + self.module.fail_json_aws(e, msg="Unable to describe load balancer") try: self.elb_attributes = self._get_elb_attributes() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + self.module.fail_json_aws(e, msg="Unable to describe load balancer attributes") try: self.elb_policies = self._get_elb_policies() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer policies') + self.module.fail_json_aws(e, msg="Unable to describe load balancer policies") try: self.elb_health = self._get_elb_instance_health() except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer instance health') + self.module.fail_json_aws(e, msg="Unable to describe load balancer instance health") # We have a number of complex parameters which can't be validated by # AnsibleModule or are only required if the ELB doesn't exist. @@ -775,7 +776,7 @@ class ElbManager(object): problem_found |= self._validate_listeners(self.listeners) problem_found |= self._validate_health_check(self.health_check) problem_found |= self._validate_stickiness(self.stickiness) - if state == 'present': + if state == "present": # When creating a new ELB problem_found |= self._validate_creation_requirements() problem_found |= self._validate_access_logs(self.access_logs) @@ -788,50 +789,50 @@ class ElbManager(object): def _get_elb_policies(self): try: attributes = self.client.describe_load_balancer_policies(LoadBalancerName=self.name) - except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + except is_boto3_error_code(["LoadBalancerNotFound", "LoadBalancerAttributeNotFoundException"]): return {} - except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + except is_boto3_error_code("AccessDenied"): # pylint: disable=duplicate-except # Be forgiving if we can't see the attributes # Note: This will break idempotency if someone has set but not describe - self.module.warn('Access Denied trying to describe load balancer policies') + self.module.warn("Access Denied trying to describe load balancer policies") return {} - return attributes['PolicyDescriptions'] + return attributes["PolicyDescriptions"] def _get_elb_instance_health(self): try: instance_health = self.client.describe_instance_health(LoadBalancerName=self.name) - except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + except is_boto3_error_code(["LoadBalancerNotFound", "LoadBalancerAttributeNotFoundException"]): return [] - except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + except is_boto3_error_code("AccessDenied"): # pylint: disable=duplicate-except # Be forgiving if we can't see the attributes # Note: This will break idempotency if someone has set but not describe - self.module.warn('Access Denied trying to describe instance health') + self.module.warn("Access Denied trying to describe instance health") return [] - return instance_health['InstanceStates'] + return instance_health["InstanceStates"] def _get_elb_attributes(self): try: attributes = self.client.describe_load_balancer_attributes(LoadBalancerName=self.name) - except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + except is_boto3_error_code(["LoadBalancerNotFound", "LoadBalancerAttributeNotFoundException"]): return {} - except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + except is_boto3_error_code("AccessDenied"): # pylint: disable=duplicate-except # Be forgiving if we can't see the attributes # Note: This will break idempotency if someone has set but not describe - self.module.warn('Access Denied trying to describe load balancer attributes') + self.module.warn("Access Denied trying to describe load balancer attributes") return {} - return attributes['LoadBalancerAttributes'] + return attributes["LoadBalancerAttributes"] def _get_elb(self): try: elbs = self._describe_loadbalancer(self.name) - except is_boto3_error_code('LoadBalancerNotFound'): + except is_boto3_error_code("LoadBalancerNotFound"): return None # Shouldn't happen, but Amazon could change the rules on us... if len(elbs) > 1: - self.module.fail_json('Found multiple ELBs with name {0}'.format(self.name)) + self.module.fail_json(f"Found multiple ELBs with name {self.name}") - self.status = 'exists' if self.status == 'gone' else self.status + self.status = "exists" if self.status == "gone" else self.status return elbs[0] @@ -841,32 +842,33 @@ class ElbManager(object): if not self.check_mode: self.client.delete_load_balancer(aws_retry=True, LoadBalancerName=self.name) self.changed = True - self.status = 'deleted' - except is_boto3_error_code('LoadBalancerNotFound'): + self.status = "deleted" + except is_boto3_error_code("LoadBalancerNotFound"): return False return True def _create_elb(self): listeners = list(self._format_listener(l) for l in self.listeners) if not self.scheme: - self.scheme = 'internet-facing' + self.scheme = "internet-facing" params = dict( LoadBalancerName=self.name, AvailabilityZones=self.zones, SecurityGroups=self.security_group_ids, Subnets=self.subnets, Listeners=listeners, - Scheme=self.scheme) + Scheme=self.scheme, + ) params = scrub_none_parameters(params) if self.tags: - params['Tags'] = ansible_dict_to_boto3_tag_list(self.tags) + params["Tags"] = ansible_dict_to_boto3_tag_list(self.tags) if not self.check_mode: self.client.create_load_balancer(aws_retry=True, **params) # create_load_balancer only returns the DNS name self.elb = self._get_elb() self.changed = True - self.status = 'created' + self.status = "created" return True def _format_listener(self, listener, inject_protocol=False): @@ -875,41 +877,41 @@ class ElbManager(object): listener = scrub_none_parameters(listener) - for protocol in ['protocol', 'instance_protocol']: + for protocol in ["protocol", "instance_protocol"]: if protocol in listener: listener[protocol] = listener[protocol].upper() - if inject_protocol and 'instance_protocol' not in listener: - listener['instance_protocol'] = listener['protocol'] + if inject_protocol and "instance_protocol" not in listener: + listener["instance_protocol"] = listener["protocol"] # Remove proxy_protocol, it has to be handled as a policy - listener.pop('proxy_protocol', None) + listener.pop("proxy_protocol", None) - ssl_id = listener.pop('ssl_certificate_id', None) + ssl_id = listener.pop("ssl_certificate_id", None) formatted_listener = snake_dict_to_camel_dict(listener, True) if ssl_id: - formatted_listener['SSLCertificateId'] = ssl_id + formatted_listener["SSLCertificateId"] = ssl_id return formatted_listener def _format_healthcheck_target(self): """Compose target string from healthcheck parameters""" - protocol = self.health_check['ping_protocol'].upper() + protocol = self.health_check["ping_protocol"].upper() path = "" - if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: - path = self.health_check['ping_path'] + if protocol in ["HTTP", "HTTPS"] and "ping_path" in self.health_check: + path = self.health_check["ping_path"] - return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) + return f"{protocol}:{self.health_check['ping_port']}{path}" def _format_healthcheck(self): return dict( Target=self._format_healthcheck_target(), - Timeout=self.health_check['timeout'], - Interval=self.health_check['interval'], - UnhealthyThreshold=self.health_check['unhealthy_threshold'], - HealthyThreshold=self.health_check['healthy_threshold'], + Timeout=self.health_check["timeout"], + Interval=self.health_check["interval"], + UnhealthyThreshold=self.health_check["unhealthy_threshold"], + HealthyThreshold=self.health_check["healthy_threshold"], ) def ensure_ok(self): @@ -922,7 +924,7 @@ class ElbManager(object): try: self.elb_attributes = self._get_elb_attributes() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + self.module.fail_json_aws(e, msg="Unable to describe load balancer attributes") self._wait_created() # Some attributes are configured on creation, others need to be updated @@ -943,7 +945,7 @@ class ElbManager(object): try: self.elb_attributes = self._get_elb_attributes() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + self.module.fail_json_aws(e, msg="Unable to describe load balancer attributes") else: self._set_subnets() self._set_zones() @@ -957,8 +959,8 @@ class ElbManager(object): self._set_stickiness_policies() self._set_instance_ids() -# if self._check_attribute_support('access_log'): -# self._set_access_log() + # if self._check_attribute_support('access_log'): + # self._set_access_log() def ensure_gone(self): """Destroy the ELB""" @@ -997,11 +999,11 @@ class ElbManager(object): if not elb: return {} - elb['LoadBalancerAttributes'] = self.elb_attributes - elb['LoadBalancerPolicies'] = self.elb_policies + elb["LoadBalancerAttributes"] = self.elb_attributes + elb["LoadBalancerPolicies"] = self.elb_policies load_balancer = camel_dict_to_snake_dict(elb) try: - load_balancer['tags'] = self._get_tags() + load_balancer["tags"] = self._get_tags() except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to get load balancer tags") @@ -1011,40 +1013,36 @@ class ElbManager(object): self._update_descriptions() if not self.elb: - return dict( - name=self.name, - status=self.status, - region=self.module.region - ) + return dict(name=self.name, status=self.status, region=self.module.region) check_elb = dict(self.elb) check_elb_attrs = dict(self.elb_attributes or {}) - check_policies = check_elb.get('Policies', {}) + check_policies = check_elb.get("Policies", {}) try: - lb_cookie_policy = check_policies['LBCookieStickinessPolicies'][0]['PolicyName'] + lb_cookie_policy = check_policies["LBCookieStickinessPolicies"][0]["PolicyName"] except (KeyError, IndexError): lb_cookie_policy = None try: - app_cookie_policy = check_policies['AppCookieStickinessPolicies'][0]['PolicyName'] + app_cookie_policy = check_policies["AppCookieStickinessPolicies"][0]["PolicyName"] except (KeyError, IndexError): app_cookie_policy = None - health_check = camel_dict_to_snake_dict(check_elb.get('HealthCheck', {})) + health_check = camel_dict_to_snake_dict(check_elb.get("HealthCheck", {})) backend_policies = list() for port, policies in self._get_backend_policies().items(): for policy in policies: - backend_policies.append("{0}:{1}".format(port, policy)) + backend_policies.append(f"{port}:{policy}") info = dict( - name=check_elb.get('LoadBalancerName'), - dns_name=check_elb.get('DNSName'), - zones=check_elb.get('AvailabilityZones'), - security_group_ids=check_elb.get('SecurityGroups'), + name=check_elb.get("LoadBalancerName"), + dns_name=check_elb.get("DNSName"), + zones=check_elb.get("AvailabilityZones"), + security_group_ids=check_elb.get("SecurityGroups"), status=self.status, - subnets=check_elb.get('Subnets'), - scheme=check_elb.get('Scheme'), - hosted_zone_name=check_elb.get('CanonicalHostedZoneName'), - hosted_zone_id=check_elb.get('CanonicalHostedZoneNameID'), + subnets=check_elb.get("Subnets"), + scheme=check_elb.get("Scheme"), + hosted_zone_name=check_elb.get("CanonicalHostedZoneName"), + hosted_zone_id=check_elb.get("CanonicalHostedZoneNameID"), lb_cookie_policy=lb_cookie_policy, app_cookie_policy=app_cookie_policy, proxy_policy=self._get_proxy_protocol_policy(), @@ -1061,41 +1059,39 @@ class ElbManager(object): info.update(instance_health) # instance state counts: InService or OutOfService - if info['instance_health']: - for instance_state in info['instance_health']: - if instance_state['state'] == "InService": - info['in_service_count'] += 1 - elif instance_state['state'] == "OutOfService": - info['out_of_service_count'] += 1 + if info["instance_health"]: + for instance_state in info["instance_health"]: + if instance_state["state"] == "InService": + info["in_service_count"] += 1 + elif instance_state["state"] == "OutOfService": + info["out_of_service_count"] += 1 else: - info['unknown_instance_state_count'] += 1 + info["unknown_instance_state_count"] += 1 - listeners = check_elb.get('ListenerDescriptions', []) + listeners = check_elb.get("ListenerDescriptions", []) if listeners: - info['listeners'] = list( - self._api_listener_as_tuple(l['Listener']) for l in listeners - ) + info["listeners"] = list(self._api_listener_as_tuple(l["Listener"]) for l in listeners) else: - info['listeners'] = [] + info["listeners"] = [] try: - info['connection_draining_timeout'] = check_elb_attrs['ConnectionDraining']['Timeout'] + info["connection_draining_timeout"] = check_elb_attrs["ConnectionDraining"]["Timeout"] except KeyError: pass try: - info['idle_timeout'] = check_elb_attrs['ConnectionSettings']['IdleTimeout'] + info["idle_timeout"] = check_elb_attrs["ConnectionSettings"]["IdleTimeout"] except KeyError: pass try: - is_enabled = check_elb_attrs['CrossZoneLoadBalancing']['Enabled'] - info['cross_az_load_balancing'] = 'yes' if is_enabled else 'no' + is_enabled = check_elb_attrs["CrossZoneLoadBalancing"]["Enabled"] + info["cross_az_load_balancing"] = "yes" if is_enabled else "no" except KeyError: pass # # return stickiness info? try: - info['tags'] = self._get_tags() + info["tags"] = self._get_tags() except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to get load balancer tags") @@ -1104,14 +1100,14 @@ class ElbManager(object): @property def _waiter_config(self): delay = min(10, self.wait_timeout) - max_attempts = (self.wait_timeout // delay) - return {'Delay': delay, 'MaxAttempts': max_attempts} + max_attempts = self.wait_timeout // delay + return {"Delay": delay, "MaxAttempts": max_attempts} def _wait_for_elb_created(self): if self.check_mode: return True - waiter = get_waiter(self.client, 'load_balancer_created') + waiter = get_waiter(self.client, "load_balancer_created") try: waiter.wait( @@ -1119,19 +1115,16 @@ class ElbManager(object): LoadBalancerNames=[self.name], ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB removal') + self.module.fail_json_aws(e, "Timeout waiting for ELB removal") return True def _wait_for_elb_interface_created(self): if self.check_mode: return True - waiter = get_waiter(self.ec2_client, 'network_interface_available') + waiter = get_waiter(self.ec2_client, "network_interface_available") - filters = ansible_dict_to_boto3_filter_list( - {'requester-id': 'amazon-elb', - 'description': 'ELB {0}'.format(self.name)} - ) + filters = ansible_dict_to_boto3_filter_list({"requester-id": "amazon-elb", "description": f"ELB {self.name}"}) try: waiter.wait( @@ -1139,7 +1132,7 @@ class ElbManager(object): Filters=filters, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal') + self.module.fail_json_aws(e, "Timeout waiting for ELB Interface removal") return True @@ -1147,7 +1140,7 @@ class ElbManager(object): if self.check_mode: return True - waiter = get_waiter(self.client, 'load_balancer_deleted') + waiter = get_waiter(self.client, "load_balancer_deleted") try: waiter.wait( @@ -1155,7 +1148,7 @@ class ElbManager(object): LoadBalancerNames=[self.name], ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB removal') + self.module.fail_json_aws(e, "Timeout waiting for ELB removal") return True @@ -1163,12 +1156,9 @@ class ElbManager(object): if self.check_mode: return True - waiter = get_waiter(self.ec2_client, 'network_interface_deleted') + waiter = get_waiter(self.ec2_client, "network_interface_deleted") - filters = ansible_dict_to_boto3_filter_list( - {'requester-id': 'amazon-elb', - 'description': 'ELB {0}'.format(self.name)} - ) + filters = ansible_dict_to_boto3_filter_list({"requester-id": "amazon-elb", "description": f"ELB {self.name}"}) try: waiter.wait( @@ -1176,7 +1166,7 @@ class ElbManager(object): Filters=filters, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal') + self.module.fail_json_aws(e, "Timeout waiting for ELB Interface removal") return True @@ -1198,7 +1188,7 @@ class ElbManager(object): Instances=instance_list, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB Instance State') + self.module.fail_json_aws(e, "Timeout waiting for ELB Instance State") return True @@ -1244,17 +1234,17 @@ class ElbManager(object): # We can't use sets here: dicts aren't hashable, so convert to the boto3 # format and use a generator to filter new_listeners = list(self._format_listener(l, True) for l in self.listeners) - existing_listeners = list(l['Listener'] for l in self.elb['ListenerDescriptions']) + existing_listeners = list(l["Listener"] for l in self.elb["ListenerDescriptions"]) listeners_to_remove = list(l for l in existing_listeners if l not in new_listeners) listeners_to_add = list(l for l in new_listeners if l not in existing_listeners) changed = False if self.purge_listeners: - ports_to_remove = list(l['LoadBalancerPort'] for l in listeners_to_remove) + ports_to_remove = list(l["LoadBalancerPort"] for l in listeners_to_remove) else: - old_ports = set(l['LoadBalancerPort'] for l in listeners_to_remove) - new_ports = set(l['LoadBalancerPort'] for l in listeners_to_add) + old_ports = set(l["LoadBalancerPort"] for l in listeners_to_remove) + new_ports = set(l["LoadBalancerPort"] for l in listeners_to_add) # If we're not purging, then we need to remove Listeners # where the full definition doesn't match, but the port does ports_to_remove = list(old_ports & new_ports) @@ -1274,13 +1264,13 @@ class ElbManager(object): def _api_listener_as_tuple(self, listener): """Adds ssl_certificate_id to ELB API tuple if present""" base_tuple = [ - listener.get('LoadBalancerPort'), - listener.get('InstancePort'), - listener.get('Protocol'), - listener.get('InstanceProtocol'), + listener.get("LoadBalancerPort"), + listener.get("InstancePort"), + listener.get("Protocol"), + listener.get("InstanceProtocol"), ] - if listener.get('SSLCertificateId', False): - base_tuple.append(listener.get('SSLCertificateId')) + if listener.get("SSLCertificateId", False): + base_tuple.append(listener.get("SSLCertificateId")) return tuple(base_tuple) def _attach_subnets(self, subnets): @@ -1289,10 +1279,7 @@ class ElbManager(object): self.changed = True if self.check_mode: return True - self.client.attach_load_balancer_to_subnets( - aws_retry=True, - LoadBalancerName=self.name, - Subnets=subnets) + self.client.attach_load_balancer_to_subnets(aws_retry=True, LoadBalancerName=self.name, Subnets=subnets) return True def _detach_subnets(self, subnets): @@ -1301,10 +1288,7 @@ class ElbManager(object): self.changed = True if self.check_mode: return True - self.client.detach_load_balancer_from_subnets( - aws_retry=True, - LoadBalancerName=self.name, - Subnets=subnets) + self.client.detach_load_balancer_from_subnets(aws_retry=True, LoadBalancerName=self.name, Subnets=subnets) return True def _set_subnets(self): @@ -1316,10 +1300,10 @@ class ElbManager(object): changed = False if self.purge_subnets: - subnets_to_detach = list(set(self.elb['Subnets']) - set(self.subnets)) + subnets_to_detach = list(set(self.elb["Subnets"]) - set(self.subnets)) else: subnets_to_detach = list() - subnets_to_attach = list(set(self.subnets) - set(self.elb['Subnets'])) + subnets_to_attach = list(set(self.subnets) - set(self.elb["Subnets"])) # You can't add multiple subnets from the same AZ. Remove first, then # add. @@ -1337,7 +1321,7 @@ class ElbManager(object): def _check_scheme(self): """Determine if the current scheme is different than the scheme of the ELB""" if self.scheme: - if self.elb['Scheme'] != self.scheme: + if self.elb["Scheme"] != self.scheme: return True return False @@ -1355,7 +1339,7 @@ class ElbManager(object): AvailabilityZones=zones, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg='Failed to enable zones for load balancer') + self.module.fail_json_aws(e, msg="Failed to enable zones for load balancer") return True def _disable_zones(self, zones): @@ -1372,7 +1356,7 @@ class ElbManager(object): AvailabilityZones=zones, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg='Failed to disable zones for load balancer') + self.module.fail_json_aws(e, msg="Failed to disable zones for load balancer") return True def _set_zones(self): @@ -1384,10 +1368,10 @@ class ElbManager(object): changed = False if self.purge_zones: - zones_to_disable = list(set(self.elb['AvailabilityZones']) - set(self.zones)) + zones_to_disable = list(set(self.elb["AvailabilityZones"]) - set(self.zones)) else: zones_to_disable = list() - zones_to_enable = list(set(self.zones) - set(self.elb['AvailabilityZones'])) + zones_to_enable = list(set(self.zones) - set(self.elb["AvailabilityZones"])) # Add before we remove to reduce the chance of an outage if someone # replaces all zones at once @@ -1406,7 +1390,7 @@ class ElbManager(object): if not self.security_group_ids: return False # Security Group Names should already by converted to IDs by this point. - if set(self.elb['SecurityGroups']) == set(self.security_group_ids): + if set(self.elb["SecurityGroups"]) == set(self.security_group_ids): return False self.changed = True @@ -1431,7 +1415,7 @@ class ElbManager(object): """Set health check values on ELB as needed""" health_check_config = self._format_healthcheck() - if self.elb and health_check_config == self.elb['HealthCheck']: + if self.elb and health_check_config == self.elb["HealthCheck"]: return False self.changed = True @@ -1452,39 +1436,39 @@ class ElbManager(object): attributes = {} if self.cross_az_load_balancing is not None: attr = dict(Enabled=self.cross_az_load_balancing) - if not self.elb_attributes.get('CrossZoneLoadBalancing', None) == attr: - attributes['CrossZoneLoadBalancing'] = attr + if not self.elb_attributes.get("CrossZoneLoadBalancing", None) == attr: + attributes["CrossZoneLoadBalancing"] = attr if self.idle_timeout is not None: attr = dict(IdleTimeout=self.idle_timeout) - if not self.elb_attributes.get('ConnectionSettings', None) == attr: - attributes['ConnectionSettings'] = attr + if not self.elb_attributes.get("ConnectionSettings", None) == attr: + attributes["ConnectionSettings"] = attr if self.connection_draining_timeout is not None: - curr_attr = dict(self.elb_attributes.get('ConnectionDraining', {})) + curr_attr = dict(self.elb_attributes.get("ConnectionDraining", {})) if self.connection_draining_timeout == 0: attr = dict(Enabled=False) - curr_attr.pop('Timeout', None) + curr_attr.pop("Timeout", None) else: attr = dict(Enabled=True, Timeout=self.connection_draining_timeout) if not curr_attr == attr: - attributes['ConnectionDraining'] = attr + attributes["ConnectionDraining"] = attr if self.access_logs is not None: - curr_attr = dict(self.elb_attributes.get('AccessLog', {})) + curr_attr = dict(self.elb_attributes.get("AccessLog", {})) # For disabling we only need to compare and pass 'Enabled' - if not self.access_logs.get('enabled'): - curr_attr = dict(Enabled=curr_attr.get('Enabled', False)) - attr = dict(Enabled=self.access_logs.get('enabled')) + if not self.access_logs.get("enabled"): + curr_attr = dict(Enabled=curr_attr.get("Enabled", False)) + attr = dict(Enabled=self.access_logs.get("enabled")) else: attr = dict( Enabled=True, - S3BucketName=self.access_logs['s3_location'], - S3BucketPrefix=self.access_logs.get('s3_prefix', ''), - EmitInterval=self.access_logs.get('interval', 60), + S3BucketName=self.access_logs["s3_location"], + S3BucketPrefix=self.access_logs.get("s3_prefix", ""), + EmitInterval=self.access_logs.get("interval", 60), ) if not curr_attr == attr: - attributes['AccessLog'] = attr + attributes["AccessLog"] = attr if not attributes: return False @@ -1495,25 +1479,23 @@ class ElbManager(object): try: self.client.modify_load_balancer_attributes( - aws_retry=True, - LoadBalancerName=self.name, - LoadBalancerAttributes=attributes + aws_retry=True, LoadBalancerName=self.name, LoadBalancerAttributes=attributes ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to apply load balancer attrbutes") def _proxy_policy_name(self): - return 'ProxyProtocol-policy' + return "ProxyProtocol-policy" def _policy_name(self, policy_type): - return 'ec2-elb-lb-{0}'.format(policy_type) + return f"ec2-elb-lb-{policy_type}" def _get_listener_policies(self): """Get a list of listener policies mapped to the LoadBalancerPort""" if not self.elb: return {} - listener_descriptions = self.elb.get('ListenerDescriptions', []) - policies = {l['LoadBalancerPort']: l['PolicyNames'] for l in listener_descriptions} + listener_descriptions = self.elb.get("ListenerDescriptions", []) + policies = {l["LoadBalancerPort"]: l["PolicyNames"] for l in listener_descriptions} return policies def _set_listener_policies(self, port, policies): @@ -1529,35 +1511,40 @@ class ElbManager(object): PolicyNames=list(policies), ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to set load balancer listener policies", - port=port, policies=policies) + self.module.fail_json_aws( + e, msg="Failed to set load balancer listener policies", port=port, policies=policies + ) return True def _get_stickiness_policies(self): """Get a list of AppCookieStickinessPolicyType and LBCookieStickinessPolicyType policies""" - return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] in ['AppCookieStickinessPolicyType', 'LBCookieStickinessPolicyType']) + return list( + p["PolicyName"] + for p in self.elb_policies + if p["PolicyTypeName"] in ["AppCookieStickinessPolicyType", "LBCookieStickinessPolicyType"] + ) def _get_app_stickness_policy_map(self): """Get a mapping of App Cookie Stickiness policy names to their definitions""" - policies = self.elb.get('Policies', {}).get('AppCookieStickinessPolicies', []) - return {p['PolicyName']: p for p in policies} + policies = self.elb.get("Policies", {}).get("AppCookieStickinessPolicies", []) + return {p["PolicyName"]: p for p in policies} def _get_lb_stickness_policy_map(self): """Get a mapping of LB Cookie Stickiness policy names to their definitions""" - policies = self.elb.get('Policies', {}).get('LBCookieStickinessPolicies', []) - return {p['PolicyName']: p for p in policies} + policies = self.elb.get("Policies", {}).get("LBCookieStickinessPolicies", []) + return {p["PolicyName"]: p for p in policies} def _purge_stickiness_policies(self): """Removes all stickiness policies from all Load Balancers""" # Used when purging stickiness policies or updating a policy (you can't # update a policy while it's connected to a Listener) stickiness_policies = set(self._get_stickiness_policies()) - listeners = self.elb['ListenerDescriptions'] + listeners = self.elb["ListenerDescriptions"] changed = False for listener in listeners: - port = listener['Listener']['LoadBalancerPort'] - policies = set(listener['PolicyNames']) + port = listener["Listener"]["LoadBalancerPort"] + policies = set(listener["PolicyNames"]) new_policies = set(policies - stickiness_policies) if policies != new_policies: changed |= self._set_listener_policies(port, new_policies) @@ -1572,12 +1559,12 @@ class ElbManager(object): # going to make changes to all listeners self._update_descriptions() - if not self.stickiness['enabled']: + if not self.stickiness["enabled"]: return self._purge_stickiness_policies() - if self.stickiness['type'] == 'loadbalancer': - policy_name = self._policy_name('LBCookieStickinessPolicyType') - expiration = self.stickiness.get('expiration') + if self.stickiness["type"] == "loadbalancer": + policy_name = self._policy_name("LBCookieStickinessPolicyType") + expiration = self.stickiness.get("expiration") if not expiration: expiration = 0 policy_description = dict( @@ -1586,21 +1573,14 @@ class ElbManager(object): ) existing_policies = self._get_lb_stickness_policy_map() add_method = self.client.create_lb_cookie_stickiness_policy - elif self.stickiness['type'] == 'application': - policy_name = self._policy_name('AppCookieStickinessPolicyType') - policy_description = dict( - PolicyName=policy_name, - CookieName=self.stickiness.get('cookie', 0) - ) + elif self.stickiness["type"] == "application": + policy_name = self._policy_name("AppCookieStickinessPolicyType") + policy_description = dict(PolicyName=policy_name, CookieName=self.stickiness.get("cookie", 0)) existing_policies = self._get_app_stickness_policy_map() add_method = self.client.create_app_cookie_stickiness_policy else: # We shouldn't get here... - self.module.fail_json( - msg='Unknown stickiness policy {0}'.format( - self.stickiness['type'] - ) - ) + self.module.fail_json(msg=f"Unknown stickiness policy {self.stickiness['type']}") changed = False # To update a policy we need to delete then re-add, and we can only @@ -1618,12 +1598,9 @@ class ElbManager(object): existing_policies=existing_policies, ) - listeners = self.elb['ListenerDescriptions'] + listeners = self.elb["ListenerDescriptions"] for listener in listeners: - changed |= self._set_lb_stickiness_policy( - listener=listener, - policy=policy_name - ) + changed |= self._set_lb_stickiness_policy(listener=listener, policy=policy_name) return changed def _delete_loadbalancer_policy(self, policy_name): @@ -1636,17 +1613,20 @@ class ElbManager(object): LoadBalancerName=self.name, PolicyName=policy_name, ) - except is_boto3_error_code('InvalidConfigurationRequest'): + except is_boto3_error_code("InvalidConfigurationRequest"): # Already deleted return False - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Failed to load balancer policy {0}".format(policy_name)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg=f"Failed to load balancer policy {policy_name}") return True def _set_stickiness_policy(self, method, description, existing_policies=None): changed = False if existing_policies: - policy_name = description['PolicyName'] + policy_name = description["PolicyName"] if policy_name in existing_policies: if existing_policies[policy_name] == description: return False @@ -1661,26 +1641,23 @@ class ElbManager(object): # This needs to be in place for comparisons, but not passed to the # method. - if not description.get('CookieExpirationPeriod', None): - description.pop('CookieExpirationPeriod', None) + if not description.get("CookieExpirationPeriod", None): + description.pop("CookieExpirationPeriod", None) try: - method( - aws_retry=True, - LoadBalancerName=self.name, - **description - ) + method(aws_retry=True, LoadBalancerName=self.name, **description) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to create load balancer stickiness policy", - description=description) + self.module.fail_json_aws( + e, msg="Failed to create load balancer stickiness policy", description=description + ) return changed def _set_lb_stickiness_policy(self, listener, policy): - port = listener['Listener']['LoadBalancerPort'] + port = listener["Listener"]["LoadBalancerPort"] stickiness_policies = set(self._get_stickiness_policies()) changed = False - policies = set(listener['PolicyNames']) + policies = set(listener["PolicyNames"]) new_policies = list(policies - stickiness_policies) new_policies.append(policy) @@ -1693,8 +1670,8 @@ class ElbManager(object): """Get a list of backend policies mapped to the InstancePort""" if not self.elb: return {} - server_descriptions = self.elb.get('BackendServerDescriptions', []) - policies = {b['InstancePort']: b['PolicyNames'] for b in server_descriptions} + server_descriptions = self.elb.get("BackendServerDescriptions", []) + policies = {b["InstancePort"]: b["PolicyNames"] for b in server_descriptions} return policies def _get_proxy_protocol_policy(self): @@ -1708,11 +1685,11 @@ class ElbManager(object): def _get_proxy_policies(self): """Get a list of ProxyProtocolPolicyType policies""" - return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] == 'ProxyProtocolPolicyType') + return list(p["PolicyName"] for p in self.elb_policies if p["PolicyTypeName"] == "ProxyProtocolPolicyType") def _get_policy_map(self): """Get a mapping of Policy names to their definitions""" - return {p['PolicyName']: p for p in self.elb_policies} + return {p["PolicyName"]: p for p in self.elb_policies} def _set_backend_policies(self): """Sets policies for all backends""" @@ -1725,16 +1702,16 @@ class ElbManager(object): proxy_ports = dict() for listener in self.listeners: - proxy_protocol = listener.get('proxy_protocol', None) + proxy_protocol = listener.get("proxy_protocol", None) # Only look at the listeners for which proxy_protocol is defined if proxy_protocol is None: next - instance_port = listener.get('instance_port') + instance_port = listener.get("instance_port") if proxy_ports.get(instance_port, None) is not None: if proxy_ports[instance_port] != proxy_protocol: self.module.fail_json_aws( - 'proxy_protocol set to conflicting values for listeners' - ' on port {0}'.format(instance_port)) + f"proxy_protocol set to conflicting values for listeners on port {instance_port}" + ) proxy_ports[instance_port] = proxy_protocol if not proxy_ports: @@ -1778,8 +1755,9 @@ class ElbManager(object): PolicyNames=policies, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to set load balancer backend policies", - port=port, policies=policies) + self.module.fail_json_aws( + e, msg="Failed to set load balancer backend policies", port=port, policies=policies + ) return True @@ -1787,11 +1765,11 @@ class ElbManager(object): """Install a proxy protocol policy if needed""" policy_map = self._get_policy_map() - policy_attributes = [dict(AttributeName='ProxyProtocol', AttributeValue='true')] + policy_attributes = [dict(AttributeName="ProxyProtocol", AttributeValue="true")] proxy_policy = dict( PolicyName=policy_name, - PolicyTypeName='ProxyProtocolPolicyType', + PolicyTypeName="ProxyProtocolPolicyType", PolicyAttributeDescriptions=policy_attributes, ) @@ -1801,23 +1779,20 @@ class ElbManager(object): if existing_policy is not None: self.module.fail_json( - msg="Unable to configure ProxyProtocol policy. " - "Policy with name {0} already exists and doesn't match.".format(policy_name), - policy=proxy_policy, existing_policy=existing_policy, + msg=f"Unable to configure ProxyProtocol policy. Policy with name {policy_name} already exists and doesn't match.", + policy=proxy_policy, + existing_policy=existing_policy, ) - proxy_policy['PolicyAttributes'] = proxy_policy.pop('PolicyAttributeDescriptions') - proxy_policy['LoadBalancerName'] = self.name + proxy_policy["PolicyAttributes"] = proxy_policy.pop("PolicyAttributeDescriptions") + proxy_policy["LoadBalancerName"] = self.name self.changed = True if self.check_mode: return True try: - self.client.create_load_balancer_policy( - aws_retry=True, - **proxy_policy - ) + self.client.create_load_balancer_policy(aws_retry=True, **proxy_policy) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to create load balancer policy", policy=proxy_policy) @@ -1826,7 +1801,7 @@ class ElbManager(object): def _get_instance_ids(self): """Get the current list of instance ids installed in the elb""" elb = self.elb or {} - return list(i['InstanceId'] for i in elb.get('Instances', [])) + return list(i["InstanceId"] for i in elb.get("Instances", [])) def _change_instances(self, method, instances): if not instances: @@ -1836,7 +1811,7 @@ class ElbManager(object): if self.check_mode: return True - instance_id_list = list({'InstanceId': i} for i in instances) + instance_id_list = list({"InstanceId": i} for i in instances) try: method( aws_retry=True, @@ -1844,8 +1819,9 @@ class ElbManager(object): Instances=instance_id_list, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to change instance registration", - instances=instance_id_list, name=self.name) + self.module.fail_json_aws( + e, msg="Failed to change instance registration", instances=instance_id_list, name=self.name + ) return True def _set_instance_ids(self): @@ -1861,24 +1837,21 @@ class ElbManager(object): changed = False - changed |= self._change_instances(self.client.register_instances_with_load_balancer, - instances_to_add) + changed |= self._change_instances(self.client.register_instances_with_load_balancer, instances_to_add) if self.wait: - self._wait_for_instance_state('instance_in_service', list(instances_to_add)) - changed |= self._change_instances(self.client.deregister_instances_from_load_balancer, - instances_to_remove) + self._wait_for_instance_state("instance_in_service", list(instances_to_add)) + changed |= self._change_instances(self.client.deregister_instances_from_load_balancer, instances_to_remove) if self.wait: - self._wait_for_instance_state('instance_deregistered', list(instances_to_remove)) + self._wait_for_instance_state("instance_deregistered", list(instances_to_remove)) return changed def _get_tags(self): - tags = self.client.describe_tags(aws_retry=True, - LoadBalancerNames=[self.name]) + tags = self.client.describe_tags(aws_retry=True, LoadBalancerNames=[self.name]) if not tags: return {} try: - tags = tags['TagDescriptions'][0]['Tags'] + tags = tags["TagDescriptions"][0]["Tags"] except (KeyError, TypeError): return {} return boto3_tag_list_to_ansible_dict(tags) @@ -1913,8 +1886,7 @@ class ElbManager(object): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to get load balancer tags") - tags_to_set, tags_to_unset = compare_aws_tags(current_tags, self.tags, - self.purge_tags) + tags_to_set, tags_to_unset = compare_aws_tags(current_tags, self.tags, self.purge_tags) changed = False try: @@ -1932,34 +1904,35 @@ class ElbManager(object): problem_found = False if not stickiness: return problem_found - if not stickiness['enabled']: + if not stickiness["enabled"]: return problem_found - if stickiness['type'] == 'application': - if not stickiness.get('cookie'): + if stickiness["type"] == "application": + if not stickiness.get("cookie"): problem_found = True self.module.fail_json( msg='cookie must be specified when stickiness type is "application"', stickiness=stickiness, ) - if stickiness.get('expiration'): + if stickiness.get("expiration"): self.warn( - msg='expiration is ignored when stickiness type is "application"',) - if stickiness['type'] == 'loadbalancer': - if stickiness.get('cookie'): + msg='expiration is ignored when stickiness type is "application"', + ) + if stickiness["type"] == "loadbalancer": + if stickiness.get("cookie"): self.warn( - msg='cookie is ignored when stickiness type is "loadbalancer"',) + msg='cookie is ignored when stickiness type is "loadbalancer"', + ) return problem_found def _validate_access_logs(self, access_logs): problem_found = False if not access_logs: return problem_found - if not access_logs['enabled']: + if not access_logs["enabled"]: return problem_found - if not access_logs.get('s3_location', None): + if not access_logs.get("s3_location", None): problem_found = True - self.module.fail_json( - msg='s3_location must be provided when access_logs.state is "present"') + self.module.fail_json(msg='s3_location must be provided when access_logs.state is "present"') return problem_found def _validate_creation_requirements(self): @@ -1968,12 +1941,10 @@ class ElbManager(object): problem_found = False if not self.subnets and not self.zones: problem_found = True - self.module.fail_json( - msg='One of subnets or zones must be provided when creating an ELB') + self.module.fail_json(msg="One of subnets or zones must be provided when creating an ELB") if not self.listeners: problem_found = True - self.module.fail_json( - msg='listeners must be provided when creating an ELB') + self.module.fail_json(msg="listeners must be provided when creating an ELB") return problem_found def _validate_listeners(self, listeners): @@ -1985,59 +1956,60 @@ class ElbManager(object): problem_found = False if not listener: return problem_found - for protocol in ['instance_protocol', 'protocol']: + for protocol in ["instance_protocol", "protocol"]: value = listener.get(protocol, None) problem = self._validate_protocol(value) problem_found |= problem if problem: - self.module.fail_json( - msg='Invalid protocol ({0}) in listener'.format(value), - listener=listener) + self.module.fail_json(msg=f"Invalid protocol ({value}) in listener", listener=listener) return problem_found def _validate_health_check(self, health_check): if not health_check: return False - protocol = health_check['ping_protocol'] + protocol = health_check["ping_protocol"] if self._validate_protocol(protocol): self.module.fail_json( - msg='Invalid protocol ({0}) defined in health check'.format(protocol), - health_check=health_check,) - if protocol.upper() in ['HTTP', 'HTTPS']: - if not health_check['ping_path']: + msg=f"Invalid protocol ({protocol}) defined in health check", + health_check=health_check, + ) + if protocol.upper() in ["HTTP", "HTTPS"]: + if not health_check["ping_path"]: self.module.fail_json( - msg='For HTTP and HTTPS health checks a ping_path must be provided', - health_check=health_check,) + msg="For HTTP and HTTPS health checks a ping_path must be provided", + health_check=health_check, + ) return False def _validate_protocol(self, protocol): if not protocol: return False - return protocol.upper() not in ['HTTP', 'HTTPS', 'TCP', 'SSL'] + return protocol.upper() not in ["HTTP", "HTTPS", "TCP", "SSL"] @AWSRetry.jittered_backoff() def _describe_loadbalancer(self, lb_name): - paginator = self.client.get_paginator('describe_load_balancers') - return paginator.paginate(LoadBalancerNames=[lb_name]).build_full_result()['LoadBalancerDescriptions'] + paginator = self.client.get_paginator("describe_load_balancers") + return paginator.paginate(LoadBalancerNames=[lb_name]).build_full_result()["LoadBalancerDescriptions"] def _get_vpc_from_subnets(self, subnets): if not subnets: return None subnet_details = self._describe_subnets(list(subnets)) - vpc_ids = set(subnet['VpcId'] for subnet in subnet_details) + vpc_ids = set(subnet["VpcId"] for subnet in subnet_details) if not vpc_ids: return None if len(vpc_ids) > 1: - self.module.fail_json("Subnets for an ELB may not span multiple VPCs", - subnets=subnet_details, vpc_ids=vpc_ids) + self.module.fail_json( + "Subnets for an ELB may not span multiple VPCs", subnets=subnet_details, vpc_ids=vpc_ids + ) return vpc_ids.pop() @AWSRetry.jittered_backoff() def _describe_subnets(self, subnet_ids): - paginator = self.ec2_client.get_paginator('describe_subnets') - return paginator.paginate(SubnetIds=subnet_ids).build_full_result()['Subnets'] + paginator = self.ec2_client.get_paginator("describe_subnets") + return paginator.paginate(SubnetIds=subnet_ids).build_full_result()["Subnets"] # Wrap it so we get the backoff @AWSRetry.jittered_backoff() @@ -2046,92 +2018,91 @@ class ElbManager(object): def main(): - access_log_spec = dict( - enabled=dict(required=False, type='bool', default=True), - s3_location=dict(required=False, type='str'), - s3_prefix=dict(required=False, type='str', default=""), - interval=dict(required=False, type='int', default=60, choices=[5, 60]), + enabled=dict(required=False, type="bool", default=True), + s3_location=dict(required=False, type="str"), + s3_prefix=dict(required=False, type="str", default=""), + interval=dict(required=False, type="int", default=60, choices=[5, 60]), ) stickiness_spec = dict( - type=dict(required=False, type='str', choices=['application', 'loadbalancer']), - enabled=dict(required=False, type='bool', default=True), - cookie=dict(required=False, type='str'), - expiration=dict(required=False, type='int') + type=dict(required=False, type="str", choices=["application", "loadbalancer"]), + enabled=dict(required=False, type="bool", default=True), + cookie=dict(required=False, type="str"), + expiration=dict(required=False, type="int"), ) healthcheck_spec = dict( - ping_protocol=dict(required=True, type='str'), - ping_path=dict(required=False, type='str'), - ping_port=dict(required=True, type='int'), - interval=dict(required=True, type='int'), - timeout=dict(aliases=['response_timeout'], required=True, type='int'), - unhealthy_threshold=dict(required=True, type='int'), - healthy_threshold=dict(required=True, type='int'), + ping_protocol=dict(required=True, type="str"), + ping_path=dict(required=False, type="str"), + ping_port=dict(required=True, type="int"), + interval=dict(required=True, type="int"), + timeout=dict(aliases=["response_timeout"], required=True, type="int"), + unhealthy_threshold=dict(required=True, type="int"), + healthy_threshold=dict(required=True, type="int"), ) listeners_spec = dict( - load_balancer_port=dict(required=True, type='int'), - instance_port=dict(required=True, type='int'), - ssl_certificate_id=dict(required=False, type='str'), - protocol=dict(required=True, type='str'), - instance_protocol=dict(required=False, type='str'), - proxy_protocol=dict(required=False, type='bool'), + load_balancer_port=dict(required=True, type="int"), + instance_port=dict(required=True, type="int"), + ssl_certificate_id=dict(required=False, type="str"), + protocol=dict(required=True, type="str"), + instance_protocol=dict(required=False, type="str"), + proxy_protocol=dict(required=False, type="bool"), ) argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), - listeners=dict(type='list', elements='dict', options=listeners_spec), - purge_listeners=dict(default=True, type='bool'), - instance_ids=dict(type='list', elements='str'), - purge_instance_ids=dict(default=False, type='bool'), - zones=dict(type='list', elements='str'), - purge_zones=dict(default=False, type='bool'), - security_group_ids=dict(type='list', elements='str'), - security_group_names=dict(type='list', elements='str'), - health_check=dict(type='dict', options=healthcheck_spec), - subnets=dict(type='list', elements='str'), - purge_subnets=dict(default=False, type='bool'), - scheme=dict(choices=['internal', 'internet-facing']), - connection_draining_timeout=dict(type='int'), - idle_timeout=dict(type='int'), - cross_az_load_balancing=dict(type='bool'), - stickiness=dict(type='dict', options=stickiness_spec), - access_logs=dict(type='dict', options=access_log_spec), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=180, type='int'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + listeners=dict(type="list", elements="dict", options=listeners_spec), + purge_listeners=dict(default=True, type="bool"), + instance_ids=dict(type="list", elements="str"), + purge_instance_ids=dict(default=False, type="bool"), + zones=dict(type="list", elements="str"), + purge_zones=dict(default=False, type="bool"), + security_group_ids=dict(type="list", elements="str"), + security_group_names=dict(type="list", elements="str"), + health_check=dict(type="dict", options=healthcheck_spec), + subnets=dict(type="list", elements="str"), + purge_subnets=dict(default=False, type="bool"), + scheme=dict(choices=["internal", "internet-facing"]), + connection_draining_timeout=dict(type="int"), + idle_timeout=dict(type="int"), + cross_az_load_balancing=dict(type="bool"), + stickiness=dict(type="dict", options=stickiness_spec), + access_logs=dict(type="dict", options=access_log_spec), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=180, type="int"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['security_group_ids', 'security_group_names'], - ['zones', 'subnets'], + ["security_group_ids", "security_group_names"], + ["zones", "subnets"], ], supports_check_mode=True, ) - wait_timeout = module.params['wait_timeout'] - state = module.params['state'] + wait_timeout = module.params["wait_timeout"] + state = module.params["state"] if wait_timeout > 600: - module.fail_json(msg='wait_timeout maximum is 600 seconds') + module.fail_json(msg="wait_timeout maximum is 600 seconds") elb_man = ElbManager(module) elb_man.validate_params(state) - if state == 'present': + if state == "present": elb_man.ensure_ok() # original boto style elb = elb_man.get_info() # boto3 style lb = elb_man.get_load_balancer() ec2_result = dict(elb=elb, load_balancer=lb) - elif state == 'absent': + elif state == "absent": elb_man.ensure_gone() # original boto style elb = elb_man.get_info() @@ -2143,5 +2114,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_access_key.py b/ansible_collections/amazon/aws/plugins/modules/iam_access_key.py new file mode 100644 index 000000000..c2e306025 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_access_key.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_access_key +version_added: 2.1.0 +version_added_collection: community.aws +short_description: Manage AWS IAM User access keys +description: + - Manage AWS IAM user access keys. +author: + - Mark Chappell (@tremble) +options: + user_name: + description: + - The name of the IAM User to which the key belongs. + required: true + type: str + aliases: ['username'] + id: + description: + - The ID of the access key. + - Required when I(state=absent). + - Mutually exclusive with I(rotate_keys). + required: false + type: str + state: + description: + - Create or remove the access key. + - When I(state=present) and I(id) is not defined a new key will be created. + required: false + type: str + default: 'present' + choices: [ 'present', 'absent' ] + active: + description: + - Whether the key should be enabled or disabled. + - Defaults to C(true) when creating a new key. + required: false + type: bool + aliases: ['enabled'] + rotate_keys: + description: + - When there are already 2 access keys attached to the IAM user the oldest + key will be removed and a new key created. + - Ignored if I(state=absent) + - Mutually exclusive with I(id). + required: false + type: bool + default: false +notes: + - For security reasons, this module should be used with B(no_log=true) and (register) functionalities + when creating new access key. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a new access key + amazon.aws.iam_access_key: + user_name: example_user + state: present + no_log: true + +- name: Delete the access_key + amazon.aws.iam_access_key: + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE + state: absent +""" + +RETURN = r""" +access_key: + description: A dictionary containing all the access key information. + returned: When the key exists. + type: complex + contains: + access_key_id: + description: The ID for the access key. + returned: success + type: str + sample: AKIA1EXAMPLE1EXAMPLE + create_date: + description: The date and time, in ISO 8601 date-time format, when the access key was created. + returned: success + type: str + sample: "2021-10-09T13:25:42+00:00" + user_name: + description: The name of the IAM user to which the key is attached. + returned: success + type: str + sample: example_user + status: + description: + - The status of the key. + - C(Active) means it can be used. + - C(Inactive) means it can not be used. + returned: success + type: str + sample: Inactive +secret_access_key: + description: + - The secret access key. + - A secret access key is the equivalent of a password which can not be changed and as such should be considered sensitive data. + - Secret access keys can only be accessed at creation time. + returned: When a new key is created. + type: str + sample: example/Example+EXAMPLE+example/Example +deleted_access_key_id: + description: + - The access key deleted during rotation. + returned: When a key was deleted during the rotation of access keys + type: str + sample: AKIA1EXAMPLE1EXAMPLE +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_access_keys +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_access_key +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + + +@IAMErrorHandler.deletion_error_handler("Failed to delete access key for user") +def delete_access_key(access_keys, user, access_key_id): + if not access_key_id: + return False + if access_key_id not in [k["access_key_id"] for k in access_keys]: + return False + + if module.check_mode: + return True + + client.delete_access_key(aws_retry=True, UserName=user, AccessKeyId=access_key_id) + return True + + +@IAMErrorHandler.common_error_handler("Failed to update access key for user") +def update_access_key_state(access_keys, user, access_key_id, enabled): + keys = {k["access_key_id"]: k for k in access_keys} + + if access_key_id not in keys: + raise AnsibleIAMError(message=f'Access key "{access_key_id}" not found attached to User "{user}"') + + if enabled is None: + return False + + access_key = keys.get(access_key_id) + + desired_status = "Active" if enabled else "Inactive" + if access_key.get("status") == desired_status: + return False + + if module.check_mode: + return True + + client.update_access_key(aws_retry=True, UserName=user, AccessKeyId=access_key_id, Status=desired_status) + return True + + +@IAMErrorHandler.common_error_handler("Failed to create access key for user") +def create_access_key(access_keys, user, rotate_keys, enabled): + changed = False + oldest_key = False + + if len(access_keys) > 1 and rotate_keys: + oldest_key = access_keys[0].get("access_key_id") + changed |= delete_access_key(access_keys, user, oldest_key) + + if module.check_mode: + if oldest_key: + return dict(deleted_access_key=oldest_key) + return dict() + + results = client.create_access_key(aws_retry=True, UserName=user) + access_key = normalize_iam_access_key(results.get("AccessKey")) + + # Update settings which can't be managed on creation + if enabled is False: + access_key_id = access_key["access_key_id"] + update_access_key_state([access_key], user, access_key_id, enabled) + access_key["status"] = "Inactive" + + if oldest_key: + access_key["deleted_access_key"] = oldest_key + + return access_key + + +def update_access_key(access_keys, user, access_key_id, enabled): + changed = update_access_key_state(access_keys, user, access_key_id, enabled) + access_keys = get_iam_access_keys(client, user) + keys = {k["access_key_id"]: k for k in access_keys} + return changed, {"access_key": keys.get(access_key_id, None)} + + +def main(): + global module + global client + + argument_spec = dict( + user_name=dict(required=True, type="str", aliases=["username"]), + id=dict(required=False, type="str"), + state=dict(required=False, choices=["present", "absent"], default="present"), + active=dict(required=False, type="bool", aliases=["enabled"]), + rotate_keys=dict(required=False, type="bool", default=False), + ) + + required_if = [ + ["state", "absent", ("id",)], + ] + mutually_exclusive = [ + ["rotate_keys", "id"], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + user = module.params.get("user_name") + access_key_id = module.params.get("id") + rotate_keys = module.params.get("rotate_keys") + enabled = module.params.get("active") + + access_keys = get_iam_access_keys(client, user) + results = dict() + + try: + if state == "absent": + changed = delete_access_key(access_keys, user, access_key_id) + module.exit_json(changed=changed) + + if access_key_id: + changed, results = update_access_key(access_keys, user, access_key_id, enabled) + else: + secret_key = create_access_key(access_keys, user, rotate_keys, enabled) + changed = True + results = { + "access_key_id": secret_key.get("access_key_id", None), + "secret_access_key": secret_key.pop("secret_access_key", None), + "deleted_access_key_id": secret_key.pop("deleted_access_key", None), + "access_key": secret_key or None, + } + results = scrub_none_parameters(results) + + module.exit_json(changed=changed, **results) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_access_key_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_access_key_info.py new file mode 100644 index 000000000..ce23a93f5 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_access_key_info.py @@ -0,0 +1,95 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_access_key_info +version_added: 2.1.0 +version_added_collection: community.aws +short_description: fetch information about AWS IAM User access keys +description: + - 'Fetches information AWS IAM user access keys.' + - 'Note: It is not possible to fetch the secret access key.' +author: + - Mark Chappell (@tremble) +options: + user_name: + description: + - The name of the IAM User to which the keys belong. + required: true + type: str + aliases: ['username'] + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Fetch Access keys for a user + amazon.aws.iam_access_key_info: + user_name: example_user +""" + +RETURN = r""" +access_key: + description: A dictionary containing all the access key information. + returned: When the key exists. + type: list + elements: dict + contains: + access_key_id: + description: The ID for the access key. + returned: success + type: str + sample: AKIA1EXAMPLE1EXAMPLE + create_date: + description: The date and time, in ISO 8601 date-time format, when the access key was created. + returned: success + type: str + sample: "2021-10-09T13:25:42+00:00" + user_name: + description: The name of the IAM user to which the key is attached. + returned: success + type: str + sample: example_user + status: + description: + - The status of the key. + - C(Active) means it can be used. + - C(Inactive) means it can not be used. + returned: success + type: str + sample: Inactive +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_access_keys +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def main(): + argument_spec = dict( + user_name=dict(required=True, type="str", aliases=["username"]), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + try: + access_keys = get_iam_access_keys(client, module.params.get("user_name")) + module.exit_json(changed=False, access_keys=access_keys) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_group.py b/ansible_collections/amazon/aws/plugins/modules/iam_group.py new file mode 100644 index 000000000..2891a4d83 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_group.py @@ -0,0 +1,441 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_group +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Manage AWS IAM groups +description: + - Manage AWS IAM groups. +author: + - Nick Aslanidis (@naslanidis) + - Maksym Postument (@infectsoldier) +options: + name: + description: + - The name of the group. + - >- + Note: Group names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyGroup) and C(/Path2/MyGroup) in the same account. + - The alias C(group_name) was added in release 7.2.0. + required: true + aliases: ['group_name'] + type: str + path: + description: + - The group path. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + aliases: ['prefix', 'path_prefix'] + version_added: 7.1.0 + type: str + managed_policies: + description: + - A list of managed policy ARNs or friendly names to attach to the role. + - If known, it is recommended to use ARNs rather than friendly names to avoid additional + lookups. + - To embed an inline policy, use M(amazon.aws.iam_policy). + required: false + type: list + elements: str + default: [] + aliases: ['managed_policy'] + users: + description: + - A list of existing users to add as members of the group. + required: false + type: list + elements: str + default: [] + state: + description: + - Create or remove the IAM group. + required: true + choices: [ 'present', 'absent' ] + type: str + purge_policies: + description: + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. + required: false + default: false + type: bool + aliases: ['purge_policy', 'purge_managed_policies'] + purge_users: + description: + - When I(purge_users=true) users which are not included in I(users) will be detached. + required: false + default: false + type: bool +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a group + amazon.aws.iam_group: + name: testgroup1 + state: present + +- name: Create a group and attach a managed policy using its ARN + amazon.aws.iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + state: present + +- name: Create a group with users as members and attach a managed policy using its ARN + amazon.aws.iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + users: + - test_user1 + - test_user2 + state: present + +- name: Remove all managed policies from an existing group with an empty list + amazon.aws.iam_group: + name: testgroup1 + state: present + purge_policies: true + +- name: Remove all group members from an existing group + amazon.aws.iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + purge_users: true + state: present + +- name: Delete the group + amazon.aws.iam_group: + name: testgroup1 + state: absent +""" + +RETURN = r""" +iam_group: + description: dictionary containing all the group information including group membership + returned: success + type: complex + contains: + group: + description: dictionary containing all the group information + returned: success + type: complex + contains: + arn: + description: the Amazon Resource Name (ARN) specifying the group + type: str + sample: "arn:aws:iam::1234567890:group/testgroup1" + create_date: + description: the date and time, in ISO 8601 date-time format, when the group was created + type: str + sample: "2017-02-08T04:36:28+00:00" + group_id: + description: the stable and unique string identifying the group + type: str + sample: AGPA12345EXAMPLE54321 + group_name: + description: the friendly name that identifies the group + type: str + sample: testgroup1 + path: + description: the path to the group + type: str + sample: / + users: + description: list containing all the group members + returned: success + type: complex + contains: + arn: + description: the Amazon Resource Name (ARN) specifying the user + type: str + sample: "arn:aws:iam::1234567890:user/test_user1" + create_date: + description: the date and time, in ISO 8601 date-time format, when the user was created + type: str + sample: "2017-02-08T04:36:28+00:00" + user_id: + description: the stable and unique string identifying the user + type: str + sample: AIDA12345EXAMPLE54321 + user_name: + description: the friendly name that identifies the user + type: str + sample: testgroup1 + path: + description: the path to the user + type: str + sample: / + attached_policies: + version_added: 7.1.0 + description: + - list containing basic information about managed policies attached to the group. + returned: success + type: complex + contains: + policy_arn: + description: the Amazon Resource Name (ARN) specifying the managed policy. + type: str + sample: "arn:aws:iam::123456789012:policy/test_policy" + policy_name: + description: the friendly name that identifies the policy. + type: str + sample: test_policy +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import convert_managed_policy_names_to_arns +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_group +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_group +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +@IAMErrorHandler.common_error_handler("update group path") +def ensure_path(connection, module, group_info, path): + if path is None: + return False + + if group_info["Group"].get("Path") == path: + return False + + if module.check_mode: + return True + + connection.update_group( + aws_retry=True, + GroupName=group_info["Group"]["GroupName"], + NewPath=path, + ) + return True + + +def detach_policies(connection, module, group_name, policies): + for policy_arn in policies: + IAMErrorHandler.deletion_error_handler(f"detach policy {policy_arn} from group")( + connection.detach_group_policy + )(aws_retry=True, GroupName=group_name, PolicyArn=policy_arn) + + +def attach_policies(connection, module, group_name, policies): + for policy_arn in policies: + IAMErrorHandler.common_error_handler(f"attach policy {policy_arn} to group")(connection.attach_group_policy)( + aws_retry=True, GroupName=group_name, PolicyArn=policy_arn + ) + + +def ensure_managed_policies(connection, module, group_info, managed_policies, purge_policies): + if managed_policies is None: + return False + + if managed_policies: + managed_policies = convert_managed_policy_names_to_arns(connection, managed_policies) + + group_name = group_info["Group"]["GroupName"] + + current_attached_policies_desc = get_attached_policy_list(connection, module, group_name) + current_attached_policies = [policy["PolicyArn"] for policy in current_attached_policies_desc] + + policies_to_add = list(set(managed_policies) - set(current_attached_policies)) + policies_to_remove = [] + if purge_policies: + policies_to_remove = list(set(current_attached_policies) - set(managed_policies)) + + if not policies_to_add and not policies_to_remove: + return False + + if module.check_mode: + return True + + detach_policies(connection, module, group_name, policies_to_remove) + attach_policies(connection, module, group_name, policies_to_add) + + return True + + +def add_group_members(connection, module, group_name, members): + for user in members: + IAMErrorHandler.common_error_handler(f"add user {user} to group")(connection.add_user_to_group)( + aws_retry=True, GroupName=group_name, UserName=user + ) + + +def remove_group_members(connection, module, group_name, members): + for user in members: + IAMErrorHandler.deletion_error_handler(f"remove user {user} from group")(connection.remove_user_from_group)( + aws_retry=True, GroupName=group_name, UserName=user + ) + + +def ensure_group_members(connection, module, group_info, users, purge_users): + if users is None: + return False + + group_name = group_info["Group"]["GroupName"] + current_group_members = [member["UserName"] for member in group_info["Users"]] + + members_to_add = list(set(users) - set(current_group_members)) + members_to_remove = [] + if purge_users: + members_to_remove = list(set(current_group_members) - set(users)) + + if not members_to_add and not members_to_remove: + return False + + if module.check_mode: + return True + + add_group_members(connection, module, group_name, members_to_add) + remove_group_members(connection, module, group_name, members_to_remove) + + return True + + +@IAMErrorHandler.common_error_handler("create group") +def get_or_create_group(connection, module, group_name, path): + group = get_iam_group(connection, group_name) + if group: + return False, group + + params = {"GroupName": group_name} + if path is not None: + params["Path"] = path + + # Check mode means we would create the group + if module.check_mode: + module.exit_json(changed=True, create_params=params) + + group = connection.create_group(aws_retry=True, **params) + + if "Users" not in group: + group["Users"] = [] + + return True, group + + +def create_or_update_group(connection, module): + changed, group_info = get_or_create_group(connection, module, module.params["name"], module.params["path"]) + + # Update the path if necessary + changed |= ensure_path( + connection, + module, + group_info, + module.params["path"], + ) + + # Manage managed policies + changed |= ensure_managed_policies( + connection, + module, + group_info, + module.params["managed_policies"], + module.params["purge_policies"], + ) + + # Manage group memberships + changed |= ensure_group_members( + connection, + module, + group_info, + module.params["users"], + module.params["purge_users"], + ) + + if module.check_mode: + module.exit_json(changed=changed) + + # Get the group again + group_info = get_iam_group(connection, module.params["name"]) + policies = get_attached_policy_list(connection, module, module.params["name"]) + group_info["AttachedPolicies"] = policies + + module.exit_json(changed=changed, iam_group=normalize_iam_group(group_info)) + + +@IAMErrorHandler.deletion_error_handler("delete group") +def destroy_group(connection, module): + group_name = module.params.get("name") + + group = get_iam_group(connection, group_name) + + if not group: + module.exit_json(changed=False) + + # Check mode means we would remove this group + if module.check_mode: + module.exit_json(changed=True) + + # Remove any attached policies otherwise deletion fails + current_policies_desc = get_attached_policy_list(connection, module, group_name) + current_policies = [policy["PolicyArn"] for policy in current_policies_desc] + detach_policies(connection, module, group_name, current_policies) + + # Remove any users in the group otherwise deletion fails + current_group_members = [user["UserName"] for user in group["Users"]] + remove_group_members(connection, module, group_name, current_group_members) + + connection.delete_group(aws_retry=True, GroupName=group_name) + + module.exit_json(changed=True) + + +@IAMErrorHandler.list_error_handler("list policies attached to group") +@AWSRetry.jittered_backoff() +def get_attached_policy_list(connection, module, name): + paginator = connection.get_paginator("list_attached_group_policies") + return paginator.paginate(GroupName=name).build_full_result()["AttachedPolicies"] + + +def main(): + argument_spec = dict( + name=dict(aliases=["group_name"], required=True), + path=dict(aliases=["prefix", "path_prefix"]), + managed_policies=dict(default=[], type="list", aliases=["managed_policy"], elements="str"), + users=dict(default=[], type="list", elements="str"), + state=dict(choices=["present", "absent"], required=True), + purge_users=dict(default=False, type="bool"), + purge_policies=dict(default=False, type="bool", aliases=["purge_policy", "purge_managed_policies"]), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + identifier_problem = validate_iam_identifiers( + "group", name=module.params.get("name"), path=module.params.get("path") + ) + if identifier_problem: + module.fail_json(msg=identifier_problem) + + connection = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + + try: + if state == "present": + create_or_update_group(connection, module) + else: + destroy_group(connection, module) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile.py b/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile.py new file mode 100644 index 000000000..52b7c9370 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile.py @@ -0,0 +1,372 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_instance_profile +version_added: 6.2.0 +short_description: manage IAM instance profiles +description: + - Manage IAM instance profiles. +author: + - Mark Chappell (@tremble) +options: + state: + description: + - Desired state of the instance profile. + type: str + choices: ["absent", "present"] + default: "present" + name: + description: + - Name of the instance profile. + - >- + Note: Profile names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyProfile) and C(/Path2/MyProfile) in the same account. + aliases: ["instance_profile_name"] + type: str + required: True + path: + description: + - The instance profile path. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + - Updating the path on an existing profile is not currently supported and will result in a + warning. + - The parameter was renamed from C(prefix) to C(path) in release 7.2.0. + aliases: ["path_prefix", "prefix"] + type: str + role: + description: + - The name of the role to attach to the instance profile. + - To remove all roles from the instance profile set I(role=""). + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Create Instance Profile + amazon.aws.iam_instance_profile: + name: "ExampleInstanceProfile" + role: "/OurExamples/MyExampleRole" + path: "/OurExamples/" + tags: + ExampleTag: Example Value + register: profile_result + +- name: Create second Instance Profile with default path + amazon.aws.iam_instance_profile: + name: "ExampleInstanceProfile2" + role: "/OurExamples/MyExampleRole" + tags: + ExampleTag: Another Example Value + register: profile_result + +- name: Find all IAM instance profiles starting with /OurExamples/ + amazon.aws.iam_instance_profile_info: + path_prefix: /OurExamples/ + register: result + +- name: Delete second Instance Profile + amazon.aws.iam_instance_profile: + name: "ExampleInstanceProfile2" + state: absent +""" + +RETURN = r""" +iam_instance_profile: + description: List of IAM instance profiles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestProfile + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2023-01-12T11:18:29+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROA12345EXAMPLE54321 + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] + tags: + description: Instance profile tags. + type: dict + returned: always + sample: '{"Env": "Prod"}' +""" + +from copy import deepcopy + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import add_role_to_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import create_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import delete_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import remove_role_from_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import tag_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import untag_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def describe_iam_instance_profile(client, name, prefix): + profiles = [] + profiles = list_iam_instance_profiles(client, name=name, prefix=prefix) + + if not profiles: + return None + + return normalize_iam_instance_profile(profiles[0]) + + +def create_instance_profile(client, name, path, tags, check_mode): + if check_mode: + return True, {"instance_profile_name": name, "path": path, "tags": tags or {}, "roles": []} + + profile = create_iam_instance_profile(client, name, path, tags) + return True, normalize_iam_instance_profile(profile) + + +def ensure_tags( + original_profile, + client, + name, + tags, + purge_tags, + check_mode, +): + if tags is None: + return False, original_profile + + original_tags = original_profile.get("tags") or {} + + tags_to_set, tag_keys_to_unset = compare_aws_tags(original_tags, tags, purge_tags) + if not tags_to_set and not tag_keys_to_unset: + return False, original_profile + + new_profile = deepcopy(original_profile) + desired_tags = deepcopy(original_tags) + + for key in tag_keys_to_unset: + desired_tags.pop(key, None) + desired_tags.update(tags_to_set) + new_profile["tags"] = desired_tags + + if not check_mode: + untag_iam_instance_profile(client, name, tag_keys_to_unset) + tag_iam_instance_profile(client, name, tags_to_set) + + return True, new_profile + + +def ensure_role( + original_profile, + client, + name, + role, + check_mode, +): + if role is None: + return False, original_profile + + if role == "" and not original_profile.get("roles"): + return False, original_profile + else: + desired_role = [] + + if original_profile.get("roles") and original_profile.get("roles")[0].get("role_name", None) == role: + return False, original_profile + else: + desired_role = [{"role_name": role}] + + new_profile = deepcopy(original_profile) + new_profile["roles"] = desired_role + + if check_mode: + return True, new_profile + + if original_profile.get("roles"): + # We're changing the role, so we always need to remove the existing one first + remove_role_from_iam_instance_profile(client, name, original_profile["roles"][0]["role_name"]) + if role: + add_role_to_iam_instance_profile(client, name, role) + + return True, new_profile + + +def ensure_present( + original_profile, + client, + name, + path, + tags, + purge_tags, + role, + check_mode, +): + changed = False + if not original_profile: + changed, new_profile = create_instance_profile( + client, + name=name, + path=path, + tags=tags, + check_mode=check_mode, + ) + else: + new_profile = deepcopy(original_profile) + + role_changed, new_profile = ensure_role( + new_profile, + client, + name, + role, + check_mode, + ) + + tags_changed, new_profile = ensure_tags( + new_profile, + client, + name, + tags, + purge_tags, + check_mode, + ) + + changed |= role_changed + changed |= tags_changed + + return changed, new_profile + + +def ensure_absent( + original_profile, + client, + name, + prefix, + check_mode, +): + if not original_profile: + return False + + if check_mode: + return True + + roles = original_profile.get("roles") or [] + for role in roles: + remove_role_from_iam_instance_profile(client, name, role.get("role_name")) + + return delete_iam_instance_profile(client, name) + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["instance_profile_name"], required=True), + path=dict(aliases=["path_prefix", "prefix"]), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(aliases=["resource_tags"], type="dict"), + purge_tags=dict(type="bool", default=True), + role=dict(), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params.get("name") + state = module.params.get("state") + path = module.params.get("path") + + identifier_problem = validate_iam_identifiers("instance profile", name=name, path=path) + if identifier_problem: + module.fail_json(msg=identifier_problem) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + try: + original_profile = describe_iam_instance_profile(client, name, path) + + if state == "absent": + changed = ensure_absent( + original_profile, + client, + name, + path, + module.check_mode, + ) + final_profile = None + else: + # As of botocore 1.34.3, the APIs don't support updating the Name or Path + if original_profile and path and original_profile.get("path") != path: + module.warn( + "iam_instance_profile doesn't support updating the path: " + f"current path '{original_profile.get('path')}', requested path '{path}'" + ) + + changed, final_profile = ensure_present( + original_profile, + client, + name, + path, + module.params["tags"], + module.params["purge_tags"], + module.params["role"], + module.check_mode, + ) + + if not module.check_mode: + final_profile = describe_iam_instance_profile(client, name, path) + + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + results = { + "changed": changed, + "iam_instance_profile": final_profile, + } + if changed: + results["diff"] = { + "before": original_profile, + "after": final_profile, + } + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile_info.py new file mode 100644 index 000000000..a26a06990 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile_info.py @@ -0,0 +1,130 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_instance_profile_info +version_added: 6.2.0 +short_description: gather information on IAM instance profiles +description: + - Gathers information about IAM instance profiles. +author: + - Mark Chappell (@tremble) +options: + name: + description: + - Name of an instance profile to search for. + - Mutually exclusive with I(prefix). + aliases: + - instance_profile_name + type: str + path_prefix: + description: + - The path prefix for filtering the results. + - Mutually exclusive with I(name). + aliases: ["path", "prefix"] + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Find all existing IAM instance profiles + amazon.aws.iam_instance_profile_info: + register: result + +- name: Describe a single instance profile + amazon.aws.iam_instance_profile_info: + name: MyIAMProfile + register: result + +- name: Find all IAM instance profiles starting with /some/path/ + amazon.aws.iam_instance_profile_info: + prefile: /some/path/ + register: result +""" + +RETURN = r""" +iam_instance_profiles: + description: List of IAM instance profiles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestProfile + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2023-01-12T11:18:29+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROA12345EXAMPLE54321 + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def describe_iam_instance_profiles(module, client): + name = module.params["name"] + prefix = module.params["path_prefix"] + profiles = [] + profiles = list_iam_instance_profiles(client, name=name, prefix=prefix) + + return [normalize_iam_instance_profile(p) for p in profiles] + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["instance_profile_name"]), + path_prefix=dict(aliases=["path", "prefix"]), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["name", "path_prefix"]], + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + try: + module.exit_json(changed=False, iam_instance_profiles=describe_iam_instance_profiles(module, client)) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_managed_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_managed_policy.py new file mode 100644 index 000000000..90796b055 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_managed_policy.py @@ -0,0 +1,488 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_managed_policy +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Manage User Managed IAM policies +description: + - Allows creating and removing managed IAM policies +options: + name: + description: + - The name of the managed policy. + - >- + Note: Policy names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyPolicy) and C(/Path2/MyPolicy) in the same account. + - The parameter was renamed from C(policy_name) to C(name) in release 7.2.0. + required: true + type: str + aliases: ["policy_name"] + path: + description: + - The path for the managed policy. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + aliases: ['prefix', 'path_prefix'] + required: false + type: str + version_added: 7.2.0 + description: + description: + - A helpful description of this policy, this value is immutable and only set when creating a new policy. + - The parameter was renamed from C(policy_description) to C(description) in release 7.2.0. + aliases: ["policy_description"] + type: str + policy: + description: + - A properly json formatted policy + type: json + make_default: + description: + - Make this revision the default revision. + default: true + type: bool + only_version: + description: + - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted. + type: bool + default: false + state: + description: + - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found. + default: present + choices: [ "present", "absent" ] + type: str +notes: + - Support for I(tags) and I(purge_tags) was added in release 7.2.0. + +author: + - "Dan Kozlowski (@dkhenry)" +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Create a policy +- name: Create IAM Managed Policy + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + policy_description: "A Helpful managed policy" + policy: "{{ lookup('template', 'managed_policy.json.j2') }}" + state: present + +# Update a policy with a new default version +- name: Update an IAM Managed Policy with new default version + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + policy: "{{ lookup('file', 'managed_policy_update.json') }}" + state: present + +# Update a policy with a new non default version +- name: Update an IAM Managed Policy with a non default version + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Allow" + Action: "logs:CreateLogGroup" + Resource: "*" + make_default: false + state: present + +# Update a policy and make it the only version and the default version +- name: Update an IAM Managed Policy with default version as the only version + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + policy: | + { + "Version": "2012-10-17", + "Statement":[{ + "Effect": "Allow", + "Action": "logs:PutRetentionPolicy", + "Resource": "*" + }] + } + only_version: true + state: present + +# Remove a policy +- name: Remove an existing IAM Managed Policy + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + state: absent +""" + +RETURN = r""" +policy: + description: Returns the basic policy information, when state == absent this will return the value of the removed policy. + returned: success + type: complex + contains: + arn: + description: The Amazon Resource Name (ARN) of the policy. + type: str + sample: "arn:aws:iam::123456789012:policy/ansible-test-12345/ansible-test-12345-policy" + attachment_count: + description: The number of entities (users, groups, and roles) that the policy is attached to. + type: int + sample: "5" + create_date: + description: The date and time, in ISO 8601 date-time format, when the policy was created. + type: str + sample: "2017-02-08T04:36:28+00:00" + default_version_id: + description: The default policy version to use. + type: str + sample: "/ansible-test-12345/" + description: + description: A friendly description of the policy. + type: str + sample: "My Example Policy" + is_attachable: + description: Specifies whether the policy can be attached to an IAM entities. + type: bool + sample: False + path: + description: The path to the policy. + type: str + sample: "/ansible-test-12345/" + permissions_boundary_usage_count: + description: The number of IAM entities (users, groups, and roles) using the policy as a permissions boundary. + type: int + sample: "5" + policy_id: + description: The stable and globally unique string identifying the policy. + type: str + sample: "ANPA12345EXAMPLE12345" + policy_name: + description: The friendly name identifying the policy. + type: str + sample: "ansible-test-12345-policy" + tags: + description: A dictionary representing the tags attached to the managed policy. + type: dict + returned: always + sample: {"Env": "Prod"} + update_date: + description: The date and time, in ISO 8601 date-time format, when the policy was last updated. + type: str + sample: "2017-02-08T05:12:13+00:00" +""" + +import json + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import detach_iam_group_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import detach_iam_role_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import detach_iam_user_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_managed_policy_by_arn +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_managed_policy_by_name +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_managed_policy_version +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_entities_for_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_managed_policy_versions +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import tag_iam_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import untag_iam_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +@IAMErrorHandler.deletion_error_handler("delete policy version") +def delete_policy_version(arn, version): + client.delete_policy_version(aws_retry=True, PolicyArn=arn, VersionId=version) + + +def _create_policy_version(arn, policy_document): + return client.create_policy_version(aws_retry=True, PolicyArn=arn, PolicyDocument=policy_document)["PolicyVersion"] + + +@IAMErrorHandler.common_error_handler("create policy version") +def create_policy_version(arn, policy_document): + if module.check_mode: + return {} + try: + version = _create_policy_version(arn, policy_document) + # There is a service limit (typically 5) of policy versions. + # + # Rather than assume that it is 5, we'll try to create the policy + # and if that doesn't work, delete the oldest non default policy version + # and try again. + except is_boto3_error_code("LimitExceeded"): + delete_oldest_non_default_version(arn) + version = _create_policy_version(arn, policy_document) + + return version + + +def delete_oldest_non_default_version(arn): + if module.check_mode: + return True + + versions = [v for v in list_iam_managed_policy_versions(client, arn) if not v["IsDefaultVersion"]] + if not versions: + return False + + versions.sort(key=lambda v: v["CreateDate"], reverse=True) + for v in versions[-1:]: + delete_policy_version(arn, v["VersionId"]) + return True + + +# This needs to return policy_version, changed +def get_or_create_policy_version(policy, policy_document): + versions = list_iam_managed_policy_versions(client, policy["Arn"]) + + for v in versions: + document = get_iam_managed_policy_version(client, policy["Arn"], v["VersionId"])["Document"] + + # If the current policy matches the existing one + if not compare_policies(document, json.loads(to_native(policy_document))): + return v, False + + # No existing version so create one + return create_policy_version(policy["Arn"], policy_document), True + + +@IAMErrorHandler.common_error_handler("set default policy version") +def set_if_default(policy, policy_version, is_default): + if not is_default: + return False + if policy_version.get("IsDefaultVersion"): + return False + if module.check_mode: + return True + + client.set_default_policy_version(aws_retry=True, PolicyArn=policy["Arn"], VersionId=policy_version["VersionId"]) + return True + + +def set_if_only(policy, policy_version, is_only): + if not is_only: + return False + versions = [v for v in list_iam_managed_policy_versions(client, policy["Arn"]) if not v["IsDefaultVersion"]] + if not versions: + return False + if module.check_mode: + return True + + for v in versions: + delete_policy_version(policy["Arn"], v["VersionId"]) + + return True + + +def detach_all_entities(policy): + arn = policy["Arn"] + entities = list_iam_entities_for_policy(client, arn) + + if not entities: + return False + + for g in entities["PolicyGroups"]: + detach_iam_group_policy(client, arn, g["GroupName"]) + for u in entities["PolicyUsers"]: + detach_iam_user_policy(client, arn, u["UserName"]) + for r in entities["PolicyRoles"]: + detach_iam_role_policy(client, arn, r["RoleName"]) + + return True + + +@IAMErrorHandler.common_error_handler("create policy") +def create_managed_policy(name, path, policy, description, tags): + if module.check_mode: + module.exit_json(changed=True) + if policy is None: + raise AnsibleIAMError(message="Managed policy would be created but policy parameter is missing") + + params = {"PolicyName": name, "PolicyDocument": policy} + + if path: + params["Path"] = path + if description: + params["Description"] = description + if tags: + params["Tags"] = ansible_dict_to_boto3_tag_list(tags) + + rvalue = client.create_policy(aws_retry=True, **params) + # rvalue is incomplete + new_policy = get_iam_managed_policy_by_arn(client, rvalue["Policy"]["Arn"]) + + module.exit_json(changed=True, policy=normalize_iam_policy(new_policy)) + + +def ensure_path(existing_policy, path): + if path is None: + return False + + existing_path = existing_policy["Path"] + if existing_path == path: + return False + + # As of botocore 1.34.3, the APIs don't support updating the Name or Path + module.warn(f"Unable to update path from '{existing_path}' to '{path}'") + return False + + +def ensure_description(existing_policy, description): + if description is None: + return False + + existing_description = existing_policy.get("Description", "") + if existing_description == description: + return False + + # As of botocore 1.34.3, the APIs don't support updating the Description + module.warn(f"Unable to update description from '{existing_description}' to '{description}'") + return False + + +def ensure_policy_document(existing_policy, policy, default, only): + if policy is None: + return False + policy_version, changed = get_or_create_policy_version(existing_policy, policy) + changed |= set_if_default(existing_policy, policy_version, default) + changed |= set_if_only(existing_policy, policy_version, only) + return changed + + +def ensure_tags(existing_policy, tags, purge_tags): + if tags is None: + return False + + original_tags = boto3_tag_list_to_ansible_dict(existing_policy.get("Tags") or []) + + tags_to_set, tag_keys_to_unset = compare_aws_tags(original_tags, tags, purge_tags) + if not tags_to_set and not tag_keys_to_unset: + return False + + if module.check_mode: + return True + + if tag_keys_to_unset: + untag_iam_policy(client, existing_policy["Arn"], tag_keys_to_unset) + if tags_to_set: + tag_iam_policy(client, existing_policy["Arn"], tags_to_set) + + return True + + +def update_managed_policy(existing_policy, path, policy, description, default, only, tags, purge_tags): + changed = ensure_path(existing_policy, path) + changed |= ensure_description(existing_policy, description) + changed |= ensure_policy_document(existing_policy, policy, default, only) + changed |= ensure_tags(existing_policy, tags, purge_tags) + + if not changed: + module.exit_json(changed=changed, policy=normalize_iam_policy(existing_policy)) + + # If anything has changed we need to refresh the policy + updated_policy = get_iam_managed_policy_by_arn(client, existing_policy["Arn"]) + module.exit_json(changed=changed, policy=normalize_iam_policy(updated_policy)) + + +def create_or_update_policy(existing_policy): + name = module.params.get("name") + path = module.params.get("path") + description = module.params.get("description") + default = module.params.get("make_default") + only = module.params.get("only_version") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + policy = None + + if module.params.get("policy") is not None: + policy = json.dumps(json.loads(module.params.get("policy"))) + + if existing_policy is None: + create_managed_policy(name, path, policy, description, tags) + else: + update_managed_policy(existing_policy, path, policy, description, default, only, tags, purge_tags) + + +@IAMErrorHandler.deletion_error_handler("delete policy") +def delete_policy(existing_policy): + if not existing_policy: + return False + + arn = existing_policy["Arn"] + if module.check_mode: + return True + + # Detach policy + detach_all_entities(existing_policy) + # Delete Versions + versions = [v for v in list_iam_managed_policy_versions(client, arn) if not v["IsDefaultVersion"]] + for v in versions: + delete_policy_version(arn, v["VersionId"]) + + # Delete policy + client.delete_policy(aws_retry=True, PolicyArn=arn) + return True + + +def main(): + global module + global client + + argument_spec = dict( + name=dict(required=True, aliases=["policy_name"]), + path=dict(aliases=["prefix", "path_prefix"]), + description=dict(aliases=["policy_description"]), + policy=dict(type="json"), + make_default=dict(type="bool", default=True), + only_version=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params.get("name") + state = module.params.get("state") + + identifier_problem = validate_iam_identifiers("policy", name=name) + if identifier_problem: + module.fail_json(msg=identifier_problem) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + existing_policy = get_iam_managed_policy_by_name(client, name) + + try: + if state == "present": + create_or_update_policy(existing_policy) + else: + changed = delete_policy(existing_policy) + module.exit_json(changed=changed, policy=normalize_iam_policy(existing_policy)) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_mfa_device_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_mfa_device_info.py new file mode 100644 index 000000000..e9e6d8e5c --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_mfa_device_info.py @@ -0,0 +1,89 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_mfa_device_info +version_added: 1.0.0 +version_added_collection: community.aws +short_description: List the MFA (Multi-Factor Authentication) devices registered for a user +description: + - List the MFA (Multi-Factor Authentication) devices registered for a user +author: + - Victor Costan (@pwnall) +options: + user_name: + description: + - The name of the user whose MFA devices will be listed + type: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +RETURN = r""" +mfa_devices: + description: The MFA devices registered for the given user + returned: always + type: list + sample: + - enable_date: "2016-03-11T23:25:36+00:00" + serial_number: arn:aws:iam::123456789012:mfa/example + user_name: example + - enable_date: "2016-03-11T23:25:37+00:00" + serial_number: arn:aws:iam::123456789012:mfa/example + user_name: example +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html +- name: List MFA devices + amazon.aws.iam_mfa_device_info: + register: mfa_devices + +# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html +- name: Assume an existing role + community.aws.sts_assume_role: + mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}" + role_arn: "arn:aws:iam::123456789012:role/someRole" + role_session_name: "someRoleSession" + register: assumed_role +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_mfa_devices +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_mfa_devices +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +def list_mfa_devices(connection, module): + user_name = module.params.get("user_name") + devices = list_iam_mfa_devices(connection, user_name) + module.exit_json(changed=False, mfa_devices=normalize_iam_mfa_devices(devices)) + + +def main(): + argument_spec = dict( + user_name=dict(required=False, default=None), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + connection = module.client("iam") + try: + list_mfa_devices(connection, module) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_password_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_password_policy.py new file mode 100644 index 000000000..fe6eb9090 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_password_policy.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_password_policy +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Update an IAM Password Policy +description: + - Module updates an IAM Password Policy on a given AWS account +author: + - "Aaron Smith (@slapula)" +options: + state: + description: + - Specifies the overall state of the password policy. + required: true + choices: ['present', 'absent'] + type: str + min_pw_length: + description: + - Minimum password length. + default: 6 + aliases: [minimum_password_length] + type: int + require_symbols: + description: + - Require symbols in password. + default: false + type: bool + require_numbers: + description: + - Require numbers in password. + default: false + type: bool + require_uppercase: + description: + - Require uppercase letters in password. + default: false + type: bool + require_lowercase: + description: + - Require lowercase letters in password. + default: false + type: bool + allow_pw_change: + description: + - Allow users to change their password. + default: false + type: bool + aliases: [allow_password_change] + pw_max_age: + description: + - Maximum age for a password in days. When this option is 0 then passwords + do not expire automatically. + default: 0 + aliases: [password_max_age] + type: int + pw_reuse_prevent: + description: + - Prevent re-use of passwords. + default: 0 + aliases: [password_reuse_prevent, prevent_reuse] + type: int + pw_expire: + description: + - Prevents users from change an expired password. + default: false + type: bool + aliases: [password_expire, expire] +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Password policy for AWS account + amazon.aws.iam_password_policy: + state: present + min_pw_length: 8 + require_symbols: false + require_numbers: true + require_uppercase: true + require_lowercase: true + allow_pw_change: true + pw_max_age: 60 + pw_reuse_prevent: 5 + pw_expire: false +""" + +RETURN = r""" # """ + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +class IAMConnection(object): + def __init__(self, module): + try: + self.connection = module.resource("iam") + self.module = module + except Exception as e: + module.fail_json(msg=f"Failed to connect to AWS: {str(e)}") + + def policy_to_dict(self, policy): + policy_attributes = [ + "allow_users_to_change_password", + "expire_passwords", + "hard_expiry", + "max_password_age", + "minimum_password_length", + "password_reuse_prevention", + "require_lowercase_characters", + "require_numbers", + "require_symbols", + "require_uppercase_characters", + ] + ret = {} + for attr in policy_attributes: + ret[attr] = getattr(policy, attr) + return ret + + def update_password_policy(self, module, policy): + min_pw_length = module.params.get("min_pw_length") + require_symbols = module.params.get("require_symbols") + require_numbers = module.params.get("require_numbers") + require_uppercase = module.params.get("require_uppercase") + require_lowercase = module.params.get("require_lowercase") + allow_pw_change = module.params.get("allow_pw_change") + pw_max_age = module.params.get("pw_max_age") + pw_reuse_prevent = module.params.get("pw_reuse_prevent") + pw_expire = module.params.get("pw_expire") + + update_parameters = dict( + MinimumPasswordLength=min_pw_length, + RequireSymbols=require_symbols, + RequireNumbers=require_numbers, + RequireUppercaseCharacters=require_uppercase, + RequireLowercaseCharacters=require_lowercase, + AllowUsersToChangePassword=allow_pw_change, + HardExpiry=pw_expire, + ) + if pw_reuse_prevent: + update_parameters.update(PasswordReusePrevention=pw_reuse_prevent) + if pw_max_age: + update_parameters.update(MaxPasswordAge=pw_max_age) + + try: + original_policy = self.policy_to_dict(policy) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + original_policy = {} + + try: + results = policy.update(**update_parameters) + policy.reload() + updated_policy = self.policy_to_dict(policy) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy") + + changed = original_policy != updated_policy + return (changed, updated_policy, camel_dict_to_snake_dict(results)) + + def delete_password_policy(self, policy): + try: + results = policy.delete() + except is_boto3_error_code("NoSuchEntity"): + self.module.exit_json(changed=False, task_status={"IAM": "Couldn't find IAM Password Policy"}) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") + return camel_dict_to_snake_dict(results) + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + "state": dict(choices=["present", "absent"], required=True), + "min_pw_length": dict(type="int", aliases=["minimum_password_length"], default=6), + "require_symbols": dict(type="bool", default=False), + "require_numbers": dict(type="bool", default=False), + "require_uppercase": dict(type="bool", default=False), + "require_lowercase": dict(type="bool", default=False), + "allow_pw_change": dict(type="bool", aliases=["allow_password_change"], default=False), + "pw_max_age": dict(type="int", aliases=["password_max_age"], default=0), + "pw_reuse_prevent": dict(type="int", aliases=["password_reuse_prevent", "prevent_reuse"], default=0), + "pw_expire": dict(type="bool", aliases=["password_expire", "expire"], default=False), + }, + supports_check_mode=True, + ) + + resource = IAMConnection(module) + policy = resource.connection.AccountPasswordPolicy() + + state = module.params.get("state") + + if state == "present": + (changed, new_policy, update_result) = resource.update_password_policy(module, policy) + module.exit_json(changed=changed, task_status={"IAM": update_result}, policy=new_policy) + + if state == "absent": + delete_result = resource.delete_password_policy(policy) + module.exit_json(changed=True, task_status={"IAM": delete_result}) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py index 8eef40304..fb2d98e08 100644 --- a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py +++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_policy version_added: 5.0.0 @@ -14,7 +12,7 @@ short_description: Manage inline IAM policies for users, groups, and roles description: - Allows uploading or removing inline IAM policies for IAM users, groups or roles. - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role), - M(community.aws.iam_group) and M(community.aws.iam_managed_policy) + M(amazon.aws.iam_group) and M(community.aws.iam_managed_policy) - This module was originally added to C(community.aws) in release 1.0.0. options: iam_type: @@ -54,21 +52,21 @@ author: - "Jonathan I. Davila (@defionscode)" - "Dennis Podkovyrin (@sbj-ss)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Advanced example, create two new groups and add a READ-ONLY policy to both # groups. - name: Create Two Groups, Mario and Luigi - community.aws.iam_group: + amazon.aws.iam_group: name: "{{ item }}" state: present loop: - - Mario - - Luigi + - Mario + - Luigi register: new_groups - name: Apply READ-ONLY policy to new groups that have been recently created @@ -91,28 +89,30 @@ EXAMPLES = ''' loop: - user: s3_user prefix: s3_user_prefix +""" -''' -RETURN = ''' +RETURN = r""" policy_names: description: A list of names of the inline policies embedded in the specified IAM resource (user, group, or role). returned: always type: list elements: str -''' +""" import json try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry class PolicyError(Exception): @@ -120,7 +120,6 @@ class PolicyError(Exception): class Policy: - def __init__(self, client, name, policy_name, policy_json, skip_duplicates, state, check_mode): self.client = client self.name = name @@ -136,24 +135,24 @@ class Policy: @staticmethod def _iam_type(): - return '' + return "" def _list(self, name): return {} def list(self): try: - return self._list(self.name).get('PolicyNames', []) - except is_boto3_error_code('AccessDenied'): + return self._list(self.name).get("PolicyNames", []) + except is_boto3_error_code("AccessDenied"): return [] def _get(self, name, policy_name): - return '{}' + return "{}" def get(self, policy_name): try: - return self._get(self.name, policy_name)['PolicyDocument'] - except is_boto3_error_code('AccessDenied'): + return self._get(self.name, policy_name)["PolicyDocument"] + except is_boto3_error_code("AccessDenied"): return {} def _put(self, name, policy_name, policy_doc): @@ -190,7 +189,7 @@ class Policy: if self.policy_json is not None: return self.get_policy_from_json() except json.JSONDecodeError as e: - raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e)) + raise PolicyError(f"Failed to decode the policy as valid JSON: {str(e)}") return None def get_policy_from_json(self): @@ -226,16 +225,16 @@ class Policy: self.updated_policies[self.policy_name] = policy_doc def run(self): - if self.state == 'present': + if self.state == "present": self.create() - elif self.state == 'absent': + elif self.state == "absent": self.delete() return { - 'changed': self.changed, - self._iam_type() + '_name': self.name, - 'policies': self.list(), - 'policy_names': self.list(), - 'diff': dict( + "changed": self.changed, + self._iam_type() + "_name": self.name, + "policies": self.list(), + "policy_names": self.list(), + "diff": dict( before=self.original_policies, after=self.updated_policies, ), @@ -243,10 +242,9 @@ class Policy: class UserPolicy(Policy): - @staticmethod def _iam_type(): - return 'user' + return "user" def _list(self, name): return self.client.list_user_policies(aws_retry=True, UserName=name) @@ -255,17 +253,18 @@ class UserPolicy(Policy): return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_user_policy( + aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc + ) def _delete(self, name, policy_name): return self.client.delete_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) class RolePolicy(Policy): - @staticmethod def _iam_type(): - return 'role' + return "role" def _list(self, name): return self.client.list_role_policies(aws_retry=True, RoleName=name) @@ -274,17 +273,18 @@ class RolePolicy(Policy): return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_role_policy( + aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc + ) def _delete(self, name, policy_name): return self.client.delete_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) class GroupPolicy(Policy): - @staticmethod def _iam_type(): - return 'group' + return "group" def _list(self, name): return self.client.list_group_policies(aws_retry=True, GroupName=name) @@ -293,7 +293,9 @@ class GroupPolicy(Policy): return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_group_policy( + aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc + ) def _delete(self, name, policy_name): return self.client.delete_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) @@ -301,44 +303,46 @@ class GroupPolicy(Policy): def main(): argument_spec = dict( - iam_type=dict(required=True, choices=['user', 'group', 'role']), - state=dict(default='present', choices=['present', 'absent']), + iam_type=dict(required=True, choices=["user", "group", "role"]), + state=dict(default="present", choices=["present", "absent"]), iam_name=dict(required=True), policy_name=dict(required=True), - policy_json=dict(type='json', default=None, required=False), - skip_duplicates=dict(type='bool', default=False, required=False) + policy_json=dict(type="json", default=None, required=False), + skip_duplicates=dict(type="bool", default=False, required=False), ) required_if = [ - ('state', 'present', ('policy_json',), True), + ("state", "present", ("policy_json",), True), ] - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) args = dict( - client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), - name=module.params.get('iam_name'), - policy_name=module.params.get('policy_name'), - policy_json=module.params.get('policy_json'), - skip_duplicates=module.params.get('skip_duplicates'), - state=module.params.get('state'), + client=module.client("iam", retry_decorator=AWSRetry.jittered_backoff()), + name=module.params.get("iam_name"), + policy_name=module.params.get("policy_name"), + policy_json=module.params.get("policy_json"), + skip_duplicates=module.params.get("skip_duplicates"), + state=module.params.get("state"), check_mode=module.check_mode, ) - iam_type = module.params.get('iam_type') + iam_type = module.params.get("iam_type") try: - if iam_type == 'user': + if iam_type == "user": policy = UserPolicy(**args) - elif iam_type == 'role': + elif iam_type == "role": policy = RolePolicy(**args) - elif iam_type == 'group': + elif iam_type == "group": policy = GroupPolicy(**args) - module.deprecate("The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are returned for now.", - date='2024-08-01', collection_name='amazon.aws') + module.deprecate( + ( + "The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are" + " returned for now." + ), + date="2024-08-01", + collection_name="amazon.aws", + ) module.exit_json(**(policy.run())) except (BotoCoreError, ClientError) as e: @@ -347,5 +351,5 @@ def main(): module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py index 125f55e1f..3e0e4eaaa 100644 --- a/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_policy_info version_added: 5.0.0 @@ -34,13 +32,12 @@ options: author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Describe all inline IAM policies on an IAM User amazon.aws.iam_policy_info: iam_type: user @@ -51,9 +48,9 @@ EXAMPLES = ''' iam_type: role iam_name: example_role policy_name: example_policy +""" -''' -RETURN = ''' +RETURN = r""" policies: description: A list containing the matching IAM inline policy names and their data returned: success @@ -75,20 +72,19 @@ all_policy_names: description: A list of names of all of the IAM inline policies on the queried object returned: success type: list -''' +""" try: import botocore except ImportError: pass -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry class Policy: - def __init__(self, client, name, policy_name): self.client = client self.name = name @@ -97,19 +93,19 @@ class Policy: @staticmethod def _iam_type(): - return '' + return "" def _list(self, name): return {} def list(self): - return self._list(self.name).get('PolicyNames', []) + return self._list(self.name).get("PolicyNames", []) def _get(self, name, policy_name): - return '{}' + return "{}" def get(self, policy_name): - return self._get(self.name, policy_name)['PolicyDocument'] + return self._get(self.name, policy_name)["PolicyDocument"] def get_all(self): policies = list() @@ -119,27 +115,20 @@ class Policy: def run(self): policy_list = self.list() - ret_val = { - 'changed': False, - self._iam_type() + '_name': self.name, - 'all_policy_names': policy_list - } + ret_val = {"changed": False, self._iam_type() + "_name": self.name, "all_policy_names": policy_list} if self.policy_name is None: ret_val.update(policies=self.get_all()) ret_val.update(policy_names=policy_list) elif self.policy_name in policy_list: - ret_val.update(policies=[{ - "policy_name": self.policy_name, - "policy_document": self.get(self.policy_name)}]) + ret_val.update(policies=[{"policy_name": self.policy_name, "policy_document": self.get(self.policy_name)}]) ret_val.update(policy_names=[self.policy_name]) return ret_val class UserPolicy(Policy): - @staticmethod def _iam_type(): - return 'user' + return "user" def _list(self, name): return self.client.list_user_policies(aws_retry=True, UserName=name) @@ -149,10 +138,9 @@ class UserPolicy(Policy): class RolePolicy(Policy): - @staticmethod def _iam_type(): - return 'role' + return "role" def _list(self, name): return self.client.list_role_policies(aws_retry=True, RoleName=name) @@ -162,10 +150,9 @@ class RolePolicy(Policy): class GroupPolicy(Policy): - @staticmethod def _iam_type(): - return 'group' + return "group" def _list(self, name): return self.client.list_group_policies(aws_retry=True, GroupName=name) @@ -176,7 +163,7 @@ class GroupPolicy(Policy): def main(): argument_spec = dict( - iam_type=dict(required=True, choices=['user', 'group', 'role']), + iam_type=dict(required=True, choices=["user", "group", "role"]), iam_name=dict(required=True), policy_name=dict(default=None, required=False), ) @@ -184,26 +171,29 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) args = dict( - client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), - name=module.params.get('iam_name'), - policy_name=module.params.get('policy_name'), + client=module.client("iam", retry_decorator=AWSRetry.jittered_backoff()), + name=module.params.get("iam_name"), + policy_name=module.params.get("policy_name"), ) - iam_type = module.params.get('iam_type') + iam_type = module.params.get("iam_type") try: - if iam_type == 'user': + if iam_type == "user": policy = UserPolicy(**args) - elif iam_type == 'role': + elif iam_type == "role": policy = RolePolicy(**args) - elif iam_type == 'group': + elif iam_type == "group": policy = GroupPolicy(**args) module.exit_json(**(policy.run())) - except is_boto3_error_code('NoSuchEntity') as e: - module.exit_json(changed=False, msg=e.response['Error']['Message']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchEntity") as e: + module.exit_json(changed=False, msg=e.response["Error"]["Message"]) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_role.py b/ansible_collections/amazon/aws/plugins/modules/iam_role.py new file mode 100644 index 000000000..a7da38c31 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_role.py @@ -0,0 +1,694 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_role +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Manage AWS IAM roles +description: + - Manage AWS IAM roles. +author: + - "Rob White (@wimnat)" +options: + path: + description: + - The path of the role. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + - Updating the path on an existing role is not currently supported and will result in a + warning. + - C(path_prefix) and C(prefix) were added as aliases in release 7.2.0. + type: str + aliases: ["prefix", "path_prefix"] + name: + description: + - The name of the role. + - >- + Note: Role names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyRole) and C(/Path2/MyRole) in the same account. + - C(role_name) was added as an alias in release 7.2.0. + required: true + type: str + aliases: ["role_name"] + description: + description: + - Provides a description of the role. + type: str + boundary: + description: + - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates. + - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false). + - This is intended for roles/users that have permissions to create new IAM objects. + - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html). + aliases: [boundary_policy_arn] + type: str + assume_role_policy_document: + description: + - The trust relationship policy document that grants an entity permission to assume the role. + - This parameter is required when I(state=present). + type: json + managed_policies: + description: + - A list of managed policy ARNs, managed policy ARNs or friendly names. + - To remove all policies set I(purge_polices=true) and I(managed_policies=[]). + - To embed an inline policy, use M(amazon.aws.iam_policy). + aliases: ['managed_policy'] + type: list + elements: str + max_session_duration: + description: + - The maximum duration (in seconds) of a session when assuming the role. + - Valid values are between 1 and 12 hours (3600 and 43200 seconds). + type: int + purge_policies: + description: + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. + type: bool + aliases: ['purge_policy', 'purge_managed_policies'] + default: true + state: + description: + - Create or remove the IAM role. + default: present + choices: [ present, absent ] + type: str + create_instance_profile: + description: + - Creates an IAM instance profile along with the role. + default: true + type: bool + delete_instance_profile: + description: + - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance + profile created with the same I(name) as the role. + - Only applies when I(state=absent). + default: false + type: bool + wait_timeout: + description: + - How long (in seconds) to wait for creation / update to complete. + default: 120 + type: int + wait: + description: + - When I(wait=True) the module will wait for up to I(wait_timeout) seconds + for IAM role creation before returning. + default: True + type: bool +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a role with description and tags + amazon.aws.iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + description: This is My New Role + tags: + env: dev + +- name: "Create a role and attach a managed policy called 'PowerUserAccess'" + amazon.aws.iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + managed_policies: + - arn:aws:iam::aws:policy/PowerUserAccess + +- name: Keep the role created above but remove all managed policies + amazon.aws.iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + managed_policies: [] + +- name: Delete the role + amazon.aws.iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file', 'policy.json') }}" + state: absent +""" + +RETURN = r""" +iam_role: + description: dictionary containing the IAM Role data + returned: success + type: complex + contains: + path: + description: the path to the role + type: str + returned: always + sample: / + role_name: + description: the friendly name that identifies the role + type: str + returned: always + sample: myrole + role_id: + description: the stable and unique string identifying the role + type: str + returned: always + sample: ABCDEFF4EZ4ABCDEFV4ZC + arn: + description: the Amazon Resource Name (ARN) specifying the role + type: str + returned: always + sample: "arn:aws:iam::1234567890:role/mynewrole" + create_date: + description: the date and time, in ISO 8601 date-time format, when the role was created + type: str + returned: always + sample: "2016-08-14T04:36:28+00:00" + assume_role_policy_document: + description: + - the policy that grants an entity permission to assume the role + - | + note: the case of keys in this dictionary are currently converted from CamelCase to + snake_case. In a release after 2023-12-01 this behaviour will change + type: dict + returned: always + sample: { + 'statement': [ + { + 'action': 'sts:AssumeRole', + 'effect': 'Allow', + 'principal': { + 'service': 'ec2.amazonaws.com' + }, + 'sid': '' + } + ], + 'version': '2012-10-17' + } + assume_role_policy_document_raw: + description: the policy that grants an entity permission to assume the role + type: dict + returned: always + version_added: 5.3.0 + sample: { + 'Statement': [ + { + 'Action': 'sts:AssumeRole', + 'Effect': 'Allow', + 'Principal': { + 'Service': 'ec2.amazonaws.com' + }, + 'Sid': '' + } + ], + 'Version': '2012-10-17' + } + + attached_policies: + description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role + type: list + returned: always + sample: [ + { + 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess', + 'policy_name': 'PowerUserAccess' + } + ] + tags: + description: role tags + type: dict + returned: always + sample: '{"Env": "Prod"}' +""" + +import json + +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import add_role_to_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import convert_managed_policy_names_to_arns +from ansible_collections.amazon.aws.plugins.module_utils.iam import create_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import delete_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_role +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_role_attached_policies +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_role +from ansible_collections.amazon.aws.plugins.module_utils.iam import remove_role_from_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +@IAMErrorHandler.common_error_handler("wait for role creation") +def wait_iam_exists(client, check_mode, role_name, wait, wait_timeout): + if check_mode or wait: + return + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + waiter = client.get_waiter("role_exists") + waiter.wait( + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, + RoleName=role_name, + ) + + +def attach_policies(client, check_mode, policies_to_attach, role_name): + if not policies_to_attach: + return False + if check_mode: + return True + + for policy_arn in policies_to_attach: + IAMErrorHandler.common_error_handler(f"attach policy {policy_arn} to role")(client.attach_role_policy)( + RoleName=role_name, PolicyArn=policy_arn, aws_retry=True + ) + return True + + +def remove_policies(client, check_mode, policies_to_remove, role_name): + if not policies_to_remove: + return False + if check_mode: + return True + + for policy in policies_to_remove: + IAMErrorHandler.deletion_error_handler(f"detach policy {policy} from role")(client.detach_role_policy)( + RoleName=role_name, PolicyArn=policy, aws_retry=True + ) + return True + + +def remove_inline_policies(client, role_name): + current_inline_policies = get_inline_policy_list(client, role_name) + for policy in current_inline_policies: + IAMErrorHandler.deletion_error_handler(f"delete policy {policy} embedded in role")(client.delete_role_policy)( + RoleName=role_name, PolicyName=policy, aws_retry=True + ) + + +def generate_create_params(module): + params = dict() + params["Path"] = module.params.get("path") or "/" + params["RoleName"] = module.params.get("name") + params["AssumeRolePolicyDocument"] = module.params.get("assume_role_policy_document") + if module.params.get("description") is not None: + params["Description"] = module.params.get("description") + if module.params.get("max_session_duration") is not None: + params["MaxSessionDuration"] = module.params.get("max_session_duration") + if module.params.get("boundary") is not None: + params["PermissionsBoundary"] = module.params.get("boundary") + if module.params.get("tags") is not None: + params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) + + return params + + +@IAMErrorHandler.common_error_handler("create role") +def create_basic_role(module, client): + """ + Perform the Role creation. + Assumes tests for the role existing have already been performed. + """ + if module.check_mode: + module.exit_json(changed=True) + + params = generate_create_params(module) + role = client.create_role(aws_retry=True, **params) + # 'Description' is documented as a key of the role returned by create_role + # but appears to be an AWS bug (the value is not returned using the AWS CLI either). + # Get the role after creating it. + # nb. doesn't use get_iam_role because we need to retry if the Role isn't there + role = _get_role_with_backoff(client, params["RoleName"]) + + return role + + +@IAMErrorHandler.common_error_handler("update assume role policy for role") +def update_role_assumed_policy(client, check_mode, role_name, target_assumed_policy, current_assumed_policy): + # Check Assumed Policy document + if target_assumed_policy is None or not compare_policies(current_assumed_policy, json.loads(target_assumed_policy)): + return False + if check_mode: + return True + + client.update_assume_role_policy(RoleName=role_name, PolicyDocument=target_assumed_policy, aws_retry=True) + return True + + +@IAMErrorHandler.common_error_handler("update description for role") +def update_role_description(client, check_mode, role_name, target_description, current_description): + # Check Description update + if target_description is None or current_description == target_description: + return False + if check_mode: + return True + + client.update_role(RoleName=role_name, Description=target_description, aws_retry=True) + return True + + +@IAMErrorHandler.common_error_handler("update maximum session duration for role") +def update_role_max_session_duration(client, check_mode, role_name, target_duration, current_duration): + # Check MaxSessionDuration update + if target_duration is None or current_duration == target_duration: + return False + if check_mode: + return True + + client.update_role(RoleName=role_name, MaxSessionDuration=target_duration, aws_retry=True) + return True + + +@IAMErrorHandler.common_error_handler("update permission boundary for role") +def _put_role_permissions_boundary(client, **params): + client.put_role_permissions_boundary(aws_retry=True, **params) + + +@IAMErrorHandler.deletion_error_handler("remove permission boundary from role") +def _delete_role_permissions_boundary(client, **params): + client.delete_role_permissions_boundary(**params) + + +def update_role_permissions_boundary(client, check_mode, role_name, permissions_boundary, current_permissions_boundary): + # Check PermissionsBoundary + if permissions_boundary is None or permissions_boundary == current_permissions_boundary: + return False + if check_mode: + return True + + if permissions_boundary == "": + _delete_role_permissions_boundary(client, RoleName=role_name) + else: + _put_role_permissions_boundary(client, RoleName=role_name, PermissionsBoundary=permissions_boundary) + return True + + +def update_managed_policies(client, check_mode, role_name, managed_policies, purge_policies): + # Check Managed Policies + if managed_policies is None: + return False + + # Get list of current attached managed policies + current_attached_policies = list_iam_role_attached_policies(client, role_name) + current_attached_policies_arn_list = [policy["PolicyArn"] for policy in current_attached_policies] + + if len(managed_policies) == 1 and managed_policies[0] is None: + managed_policies = [] + + policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies) + policies_to_remove = policies_to_remove if purge_policies else [] + policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list) + + changed = False + if purge_policies and policies_to_remove: + if check_mode: + return True + else: + changed |= remove_policies(client, check_mode, policies_to_remove, role_name) + + if policies_to_attach: + if check_mode: + return True + else: + changed |= attach_policies(client, check_mode, policies_to_attach, role_name) + + return changed + + +def update_basic_role(module, client, role_name, role): + check_mode = module.check_mode + assumed_policy = module.params.get("assume_role_policy_document") + description = module.params.get("description") + duration = module.params.get("max_session_duration") + path = module.params.get("path") + permissions_boundary = module.params.get("boundary") + purge_tags = module.params.get("purge_tags") + tags = module.params.get("tags") + + # current attributes + current_assumed_policy = role.get("AssumeRolePolicyDocument") + current_description = role.get("Description") + current_duration = role.get("MaxSessionDuration") + current_permissions_boundary = role.get("PermissionsBoundary", {}).get("PermissionsBoundaryArn", "") + current_tags = role.get("Tags", []) + + # As of botocore 1.34.3, the APIs don't support updating the Name or Path + if update_role_path(client, check_mode, role, path): + module.warn( + "iam_role doesn't support updating the path: " f"current path '{role.get('Path')}', requested path '{path}'" + ) + + changed = False + + # Update attributes + changed |= update_role_tags(client, check_mode, role_name, tags, purge_tags, current_tags) + changed |= update_role_assumed_policy(client, check_mode, role_name, assumed_policy, current_assumed_policy) + changed |= update_role_description(client, check_mode, role_name, description, current_description) + changed |= update_role_max_session_duration(client, check_mode, role_name, duration, current_duration) + changed |= update_role_permissions_boundary( + client, check_mode, role_name, permissions_boundary, current_permissions_boundary + ) + + return changed + + +def create_or_update_role(module, client): + check_mode = module.check_mode + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + role_name = module.params.get("name") + create_instance_profile = module.params.get("create_instance_profile") + path = module.params.get("path") + purge_policies = module.params.get("purge_policies") + managed_policies = module.params.get("managed_policies") + if managed_policies: + # Attempt to list the policies early so we don't leave things behind if we can't find them. + managed_policies = convert_managed_policy_names_to_arns(client, managed_policies) + + changed = False + + # Get role + role = get_iam_role(client, role_name) + + # If role is None, create it + if role is None: + role = create_basic_role(module, client) + wait_iam_exists(client, check_mode, role_name, wait, wait_timeout) + changed = True + else: + changed = update_basic_role(module, client, role_name, role) + wait_iam_exists(client, check_mode, role_name, wait, wait_timeout) + + if create_instance_profile: + changed |= create_instance_profiles(client, check_mode, role_name, path) + wait_iam_exists(client, check_mode, role_name, wait, wait_timeout) + + changed |= update_managed_policies(client, module.check_mode, role_name, managed_policies, purge_policies) + wait_iam_exists(client, check_mode, role_name, wait, wait_timeout) + + # Get the role again + role = get_iam_role(client, role_name) + role["AttachedPolicies"] = list_iam_role_attached_policies(client, role_name) + camel_role = normalize_iam_role(role, _v7_compat=True) + + module.exit_json(changed=changed, iam_role=camel_role, **camel_role) + + +def create_instance_profiles(client, check_mode, role_name, path): + # Fetch existing Profiles + instance_profiles = list_iam_instance_profiles(client, role=role_name) + + # Profile already exists + if any(p["InstanceProfileName"] == role_name for p in instance_profiles): + return False + + if check_mode: + return True + + path = path or "/" + # Make sure an instance profile is created + create_iam_instance_profile(client, role_name, path, {}) + add_role_to_iam_instance_profile(client, role_name, role_name) + + return True + + +def remove_instance_profiles(client, check_mode, role_name, delete_instance_profile): + """Removes the role from instance profiles and deletes the instance profile if + delete_instance_profile is set + """ + + instance_profiles = list_iam_instance_profiles(client, role=role_name) + if not instance_profiles: + return False + if check_mode: + return True + + # Remove the role from the instance profile(s) + for profile in instance_profiles: + profile_name = profile["InstanceProfileName"] + remove_role_from_iam_instance_profile(client, profile_name, role_name) + if not delete_instance_profile: + continue + # Delete the instance profile if the role and profile names match + if profile_name == role_name: + delete_iam_instance_profile(client, profile_name) + + +@IAMErrorHandler.deletion_error_handler("delete role") +def destroy_role(client, check_mode, role_name, delete_profiles): + role = get_iam_role(client, role_name) + + if role is None: + return False + + if check_mode: + return True + + # Before we try to delete the role we need to remove any + # - attached instance profiles + # - attached managed policies + # - embedded inline policies + remove_instance_profiles(client, check_mode, role_name, delete_profiles) + update_managed_policies(client, check_mode, role_name, [], True) + remove_inline_policies(client, role_name) + + client.delete_role(aws_retry=True, RoleName=role_name) + return True + + +@IAMErrorHandler.common_error_handler("get role") +@AWSRetry.jittered_backoff(catch_extra_error_codes=["NoSuchEntity"]) +def _get_role_with_backoff(client, name): + client.get_role(RoleName=name)["Role"] + + +@IAMErrorHandler.list_error_handler("list attached inline policies for role") +def get_inline_policy_list(client, name): + return client.list_role_policies(RoleName=name, aws_retry=True)["PolicyNames"] + + +def update_role_path(client, check_mode, role, path): + if path is None: + return False + if path == role.get("Path"): + return False + if check_mode: + return True + + # Not currently supported by the APIs + pass + return True + + +@IAMErrorHandler.common_error_handler("set tags for role") +def update_role_tags(client, check_mode, role_name, new_tags, purge_tags, existing_tags): + if new_tags is None: + return False + existing_tags = boto3_tag_list_to_ansible_dict(existing_tags) + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) + if not tags_to_remove and not tags_to_add: + return False + if check_mode: + return True + + if tags_to_remove: + client.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True) + if tags_to_add: + client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) + + return True + + +def validate_params(module): + if module.params.get("boundary"): + if module.params.get("create_instance_profile"): + module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") + if not validate_aws_arn(module.params.get("boundary"), service="iam"): + module.fail_json(msg="Boundary policy must be an ARN") + if module.params.get("max_session_duration"): + max_session_duration = module.params.get("max_session_duration") + if max_session_duration < 3600 or max_session_duration > 43200: + module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)") + + identifier_problem = validate_iam_identifiers( + "role", name=module.params.get("name"), path=module.params.get("path") + ) + if identifier_problem: + module.fail_json(msg=identifier_problem) + + +def main(): + argument_spec = dict( + name=dict(type="str", aliases=["role_name"], required=True), + path=dict(type="str", aliases=["path_prefix", "prefix"]), + assume_role_policy_document=dict(type="json"), + managed_policies=dict(type="list", aliases=["managed_policy"], elements="str"), + max_session_duration=dict(type="int"), + state=dict(type="str", choices=["present", "absent"], default="present"), + description=dict(type="str"), + boundary=dict(type="str", aliases=["boundary_policy_arn"]), + create_instance_profile=dict(type="bool", default=True), + delete_instance_profile=dict(type="bool", default=False), + purge_policies=dict(default=True, type="bool", aliases=["purge_policy", "purge_managed_policies"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=120, type="int"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["assume_role_policy_document"])], + supports_check_mode=True, + ) + + module.deprecate( + "All return values other than iam_role and changed have been deprecated and " + "will be removed in a release after 2023-12-01.", + date="2023-12-01", + collection_name="amazon.aws", + ) + module.deprecate( + "In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + "iam_role.assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", + collection_name="amazon.aws", + ) + + validate_params(module) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + role_name = module.params.get("name") + delete_profiles = module.params.get("delete_instance_profile") + + try: + if state == "present": + create_or_update_role(module, client) + elif state == "absent": + changed = destroy_role(client, module.check_mode, role_name, delete_profiles) + module.exit_json(changed=changed) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py new file mode 100644 index 000000000..e77689878 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_role_info +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Gather information on IAM roles +description: + - Gathers information about IAM roles. +author: + - "Will Thames (@willthames)" +options: + name: + description: + - Name of a role to search for. + - Mutually exclusive with I(path_prefix). + aliases: + - role_name + type: str + path_prefix: + description: + - Prefix of role to restrict IAM role search for. + - Mutually exclusive with I(name). + - C(path) and C(prefix) were added as aliases in release 7.2.0. + - In a release after 2026-05-01 paths must begin and end with C(/). + Prior to this paths will automatically have C(/) added as appropriate + to ensure that they start and end with C(/). + type: str + aliases: ["path", "prefix"] +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: find all existing IAM roles + amazon.aws.iam_role_info: + register: result + +- name: describe a single role + amazon.aws.iam_role_info: + name: MyIAMRole + +- name: describe all roles matching a path prefix + amazon.aws.iam_role_info: + path_prefix: /application/path/ +""" + +RETURN = r""" +iam_roles: + description: List of IAM roles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for IAM role. + returned: always + type: str + sample: arn:aws:iam::123456789012:role/AnsibleTestRole + assume_role_policy_document: + description: + - The policy that grants an entity permission to assume the role + - | + Note: the case of keys in this dictionary are currently converted from CamelCase to + snake_case. In a release after 2023-12-01 this behaviour will change. + returned: always + type: dict + assume_role_policy_document_raw: + description: The policy document describing what can assume the role. + returned: always + type: dict + version_added: 5.3.0 + create_date: + description: Date IAM role was created. + returned: always + type: str + sample: '2017-10-23T00:05:08+00:00' + inline_policies: + description: List of names of inline policies. + returned: always + type: list + sample: [] + managed_policies: + description: List of attached managed policies. + returned: always + type: complex + contains: + policy_arn: + description: Amazon Resource Name for the policy. + returned: always + type: str + sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy + policy_name: + description: Name of managed policy. + returned: always + type: str + sample: AnsibleTestEC2Policy + instance_profiles: + description: List of attached instance profiles. + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2017-10-23T00:05:08+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROAII7ABCD123456EFGH + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] + path: + description: Path of role. + returned: always + type: str + sample: / + role_id: + description: Amazon Identifier for the role. + returned: always + type: str + sample: AROAII7ABCD123456EFGH + role_name: + description: Name of the role. + returned: always + type: str + sample: AnsibleTestRole + tags: + description: Role tags. + type: dict + returned: always + sample: '{"Env": "Prod"}' +""" + + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_role +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_role_attached_policies +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_role_policies +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_roles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_role +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def expand_iam_role(client, role): + name = role["RoleName"] + role["InlinePolicies"] = list_iam_role_policies(client, name) + role["ManagedPolicies"] = list_iam_role_attached_policies(client, name) + role["InstanceProfiles"] = list_iam_instance_profiles(client, role=name) + return role + + +def describe_iam_roles(client, name, path_prefix): + if name: + roles = [get_iam_role(client, name)] + else: + roles = list_iam_roles(client, path=path_prefix) + roles = [r for r in roles if r is not None] + return [normalize_iam_role(expand_iam_role(client, role), _v7_compat=True) for role in roles] + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["role_name"]), + path_prefix=dict(aliases=["path", "prefix"]), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["name", "path_prefix"]], + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + name = module.params["name"] + path_prefix = module.params["path_prefix"] + + module.deprecate( + "In a release after 2023-12-01 the contents of assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + ".assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", + collection_name="amazon.aws", + ) + + # Once the deprecation is over we can merge this into a single call to validate_iam_identifiers + if name: + validation_error = validate_iam_identifiers("role", name=name) + if validation_error: + module.fail_json(msg=validation_error) + if path_prefix: + validation_error = validate_iam_identifiers("role", path=path_prefix) + if validation_error: + _prefix = "/" if not path_prefix.startswith("/") else "" + _suffix = "/" if not path_prefix.endswith("/") else "" + path_prefix = "{_prefix}{path_prefix}{_suffix}" + module.deprecate( + "In a release after 2026-05-01 paths must begin and end with /. " + "path_prefix has been modified to '{path_prefix}'", + date="2026-05-01", + collection_name="amazon.aws", + ) + + try: + module.exit_json(changed=False, iam_roles=describe_iam_roles(client, name, path_prefix)) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_user.py b/ansible_collections/amazon/aws/plugins/modules/iam_user.py index a4e056c0e..70231d794 100644 --- a/ansible_collections/amazon/aws/plugins/modules/iam_user.py +++ b/ansible_collections/amazon/aws/plugins/modules/iam_user.py @@ -1,28 +1,53 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: iam_user version_added: 5.0.0 short_description: Manage AWS IAM users description: - A module to manage AWS IAM users. - - The module does not manage groups that users belong to, groups memberships can be managed using M(community.aws.iam_group). + - The module does not manage groups that users belong to, groups memberships can be managed using M(amazon.aws.iam_group). - This module was originally added to C(community.aws) in release 1.0.0. author: - Josh Souza (@joshsouza) options: name: description: - - The name of the user to create. + - The name of the user. + - >- + Note: user names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyUser) and C(/Path2/MyUser) in the same account. + - C(user_name) was added as an alias in release 7.2.0. required: true type: str + aliases: ['user_name'] + path: + description: + - The path for the user. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + aliases: ['prefix', 'path_prefix'] + required: false + type: str + version_added: 7.2.0 + boundary: + description: + - The ARN of an IAM managed policy to apply as a boundary policy for this user. + - Boundary policies can be used to restrict the permissions a user can excercise, but does not + grant any policies in and of itself. + - For more information on boundaries, see + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html). + - Set to the empty string C("") to remove the boundary policy. + aliases: ["boundary_policy_arn", "permissions_boundary"] + required: false + type: str + version_added: 7.2.0 password: description: - The password to apply to the user. @@ -32,7 +57,8 @@ options: version_added_collection: community.aws password_reset_required: description: - - Defines if the user is required to set a new password after login. + - Defines if the user is required to set a new password when they log in. + - Ignored unless a new password is set. required: false type: bool default: false @@ -61,8 +87,8 @@ options: - To embed an inline policy, use M(community.aws.iam_policy). required: false type: list - elements: str default: [] + elements: str aliases: ['managed_policy'] state: description: @@ -95,16 +121,16 @@ options: notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Note: This module does not allow management of groups that users belong to. -# Groups should manage their membership directly using community.aws.iam_group, +# Groups should manage their membership directly using amazon.aws.iam_group, # as users belong to them. - name: Create a user @@ -142,9 +168,9 @@ EXAMPLES = r''' amazon.aws.iam_user: name: testuser1 state: absent +""" -''' -RETURN = r''' +RETURN = r""" user: description: dictionary containing all the user information returned: success @@ -175,407 +201,604 @@ user: type: dict returned: always sample: {"Env": "Prod"} -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule + attached_policies: + version_added: 7.2.0 + description: + - list containing basic information about managed policies attached to the group. + returned: success + type: complex + contains: + policy_arn: + description: the Amazon Resource Name (ARN) specifying the managed policy. + type: str + sample: "arn:aws:iam::123456789012:policy/test_policy" + policy_name: + description: the friendly name that identifies the policy. + type: str + sample: test_policy +""" from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import convert_managed_policy_names_to_arns +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_user +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_user +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +@IAMErrorHandler.common_error_handler("wait for IAM user creation") +def _wait_user_exists(connection, **params): + waiter = connection.get_waiter("user_exists") + waiter.wait(**params) + + +def wait_iam_exists(connection, module): + if not module.params.get("wait"): + return + + user_name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + waiter_config = {"Delay": delay, "MaxAttempts": max_attempts} + + _wait_user_exists(connection, WaiterConfig=waiter_config, UserName=user_name) + + +@IAMErrorHandler.common_error_handler("create user") +def create_user(connection, module, user_name, path, boundary, tags): + params = {"UserName": user_name} + if path: + params["Path"] = path + if boundary: + params["PermissionsBoundary"] = boundary + if tags: + params["Tags"] = ansible_dict_to_boto3_tag_list(tags) + + if module.check_mode: + module.exit_json(changed=True, create_params=params) + + user = connection.create_user(aws_retry=True, **params)["User"] + + return normalize_iam_user(user) + + +@IAMErrorHandler.common_error_handler("create user login profile") +def _create_login_profile(connection, **params): + return connection.create_login_profile(aws_retry=True, **params) + + +# Uses the list error handler because we "update" as a quick test for existence +# when our next step would be update or create. +@IAMErrorHandler.list_error_handler("update user login profile") +def _update_login_profile(connection, **params): + return connection.update_login_profile(aws_retry=True, **params) + + +def _create_or_update_login_profile(connection, name, password, reset): + # Apply new password / update password for the user + user_params = { + "UserName": name, + "Password": password, + "PasswordResetRequired": reset, + } + + retval = _update_login_profile(connection, **user_params) + if retval: + return retval + return _create_login_profile(connection, **user_params) -def compare_attached_policies(current_attached_policies, new_attached_policies): +def ensure_login_profile(connection, check_mode, user_name, password, update, reset, new_user): + if password is None: + return False, None + if update == "on_create" and not new_user: + return False, None - # If new_attached_policies is None it means we want to remove all policies - if len(current_attached_policies) > 0 and new_attached_policies is None: + if check_mode: + return True, None + + return True, _create_or_update_login_profile(connection, user_name, password, reset) + + +@IAMErrorHandler.list_error_handler("get login profile") +def _get_login_profile(connection, name): + return connection.get_login_profile(aws_retry=True, UserName=name).get("LoginProfile") + + +@IAMErrorHandler.deletion_error_handler("delete login profile") +def _delete_login_profile(connection, name): + connection.delete_login_profile(aws_retry=True, UserName=name) + + +def remove_login_profile(connection, check_mode, user_name, remove_password, new_user): + if new_user: + return False + if not remove_password: return False - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) + # In theory we could skip this check outside check_mode + login_profile = _get_login_profile(connection, user_name) + if not login_profile: + return False - if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)): + if check_mode: return True - else: - return False + _delete_login_profile(connection, user_name) + return True -def convert_friendly_names_to_arns(connection, module, policy_names): - # List comprehension that looks for any policy in the 'policy_names' list - # that does not begin with 'arn'. If there aren't any, short circuit. - # If there are, translate friendly name to the full arn - if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): - return policy_names - allpolicies = {} - paginator = connection.get_paginator('list_policies') - policies = paginator.paginate().build_full_result()['Policies'] +@IAMErrorHandler.list_error_handler("get policies for user") +def _list_attached_policies(connection, user_name): + return connection.list_attached_user_policies(aws_retry=True, UserName=user_name)["AttachedPolicies"] - for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] - try: - return [allpolicies[policy] for policy in policy_names] - except KeyError as e: - module.fail_json(msg="Couldn't find policy: " + str(e)) +@IAMErrorHandler.common_error_handler("attach policy to user") +def attach_policies(connection, check_mode, user_name, policies): + if not policies: + return False + if check_mode: + return True + for policy_arn in policies: + connection.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) -def wait_iam_exists(connection, module): - user_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') +@IAMErrorHandler.common_error_handler("detach policy from user") +def detach_policies(connection, check_mode, user_name, policies): + if not policies: + return False + if check_mode: + return True + for policy_arn in policies: + connection.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - try: - waiter = connection.get_waiter('user_exists') - waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, - UserName=user_name, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on IAM user creation') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on IAM user creation') +def ensure_managed_policies(connection, check_mode, user_name, managed_policies, purge_policies): + if managed_policies is None: + return False + managed_policies = convert_managed_policy_names_to_arns(connection, managed_policies) -def create_or_update_login_profile(connection, module): + # Manage managed policies + attached_policies_desc = _list_attached_policies(connection, user_name) + current_attached_policies = [policy["PolicyArn"] for policy in attached_policies_desc] - # Apply new password / update password for the user - user_params = dict() - user_params['UserName'] = module.params.get('name') - user_params['Password'] = module.params.get('password') - user_params['PasswordResetRequired'] = module.params.get('password_reset_required') - retval = {} + policies_to_add = list(set(managed_policies) - set(current_attached_policies)) + policies_to_remove = [] + if purge_policies: + policies_to_remove = list(set(current_attached_policies) - set(managed_policies)) - try: - retval = connection.update_login_profile(**user_params) - except is_boto3_error_code('NoSuchEntity'): - # Login profile does not yet exist - create it - try: - retval = connection.create_login_profile(**user_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create user login profile") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to update user login profile") - - return True, retval - - -def delete_login_profile(connection, module): - ''' - Deletes a users login profile. - Parameters: - connection: IAM client - module: AWSModule - Returns: - (bool): True if login profile deleted, False if no login profile found to delete - ''' - user_params = dict() - user_params['UserName'] = module.params.get('name') - - # User does not have login profile - nothing to delete - if not user_has_login_profile(connection, module, user_params['UserName']): + if not policies_to_add and not policies_to_remove: return False - if not module.check_mode: - try: - connection.delete_login_profile(**user_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete user login profile") + if check_mode: + return True + + detach_policies(connection, check_mode, user_name, policies_to_remove) + attach_policies(connection, check_mode, user_name, policies_to_add) return True -def create_or_update_user(connection, module): +@IAMErrorHandler.common_error_handler("set tags for user") +def ensure_user_tags(connection, check_mode, user, user_name, new_tags, purge_tags): + if new_tags is None: + return False - params = dict() - params['UserName'] = module.params.get('name') - managed_policies = module.params.get('managed_policies') - purge_policies = module.params.get('purge_policies') + existing_tags = user["tags"] - if module.params.get('tags') is not None: - params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) - changed = False + if not tags_to_remove and not tags_to_add: + return False - if managed_policies: - managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) + if check_mode: + return True - # Get user - user = get_user(connection, module, params['UserName']) + if tags_to_remove: + connection.untag_user(UserName=user_name, TagKeys=tags_to_remove) + if tags_to_add: + connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) - # If user is None, create it - new_login_profile = False - if user is None: - # Check mode means we would create the user - if module.check_mode: - module.exit_json(changed=True) + return True - try: - connection.create_user(**params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create user") - # Wait for user to be fully available before continuing - if module.params.get('wait'): - wait_iam_exists(connection, module) +@IAMErrorHandler.deletion_error_handler("remove permissions boundary for user") +def _delete_user_permissions_boundary(connection, check_mode, user_name): + if check_mode: + return True + connection.delete_user_permissions_boundary(aws_retry=True, UserName=user_name) + + +@IAMErrorHandler.common_error_handler("set permissions boundary for user") +def _put_user_permissions_boundary(connection, check_mode, user_name, boundary): + if check_mode: + return True + connection.put_user_permissions_boundary(aws_retry=True, UserName=user_name, PermissionsBoundary=boundary) + + +def ensure_permissions_boundary(connection, check_mode, user, user_name, boundary): + if boundary is None: + return False + + current_boundary = user.get("permissions_boundary", "") if user else None + + if current_boundary: + current_boundary = current_boundary.get("permissions_boundary_arn") + + if boundary == current_boundary: + return False - if module.params.get('password') is not None: - login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) + if check_mode: + return True - if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): - new_login_profile = True + if boundary == "": + _delete_user_permissions_boundary(connection, check_mode, user_name) else: - login_profile_result = None - update_result = update_user_tags(connection, module, params, user) + _put_user_permissions_boundary(connection, check_mode, user_name, boundary) - if module.params['update_password'] == "always" and module.params.get('password') is not None: - # Can't compare passwords, so just return changed on check mode runs - if module.check_mode: - module.exit_json(changed=True) - login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) + return True - if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): - new_login_profile = True - elif module.params.get('remove_password'): - login_profile_result = delete_login_profile(connection, module) +@IAMErrorHandler.common_error_handler("set path for user") +def ensure_path(connection, check_mode, user, user_name, path): + if path is None: + return False - changed = bool(update_result) or bool(login_profile_result) + current_path = user.get("path", "") if user else None + + if path == current_path: + return False + + if check_mode: + return True + + connection.update_user(aws_retry=True, UserName=user_name, NewPath=path) + + return True - # Manage managed policies - current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) - if not compare_attached_policies(current_attached_policies, managed_policies): - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) - - # If managed_policies has a single empty element we want to remove all attached policies - if purge_policies: - # Detach policies not present - for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)): - changed = True - if not module.check_mode: - try: - connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to detach policy {0} from user {1}".format( - policy_arn, params['UserName'])) - - # If there are policies to adjust that aren't in the current list, then things have changed - # Otherwise the only changes were in purging above - if set(managed_policies).difference(set(current_attached_policies_arn_list)): - changed = True - # If there are policies in managed_policies attach each policy - if managed_policies != [None] and not module.check_mode: - for policy_arn in managed_policies: - try: - connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach policy {0} to user {1}".format( - policy_arn, params['UserName'])) + +def create_or_update_user(connection, module): + user_name = module.params.get("name") + + changed = False + new_user = False + user = get_iam_user(connection, user_name) + + boundary = module.params.get("boundary") + if boundary: + boundary = convert_managed_policy_names_to_arns(connection, [module.params.get("boundary")])[0] + + if user is None: + user = create_user( + connection, + module, + user_name, + module.params.get("path"), + boundary, + module.params.get("tags"), + ) + changed = True + # Wait for user to be fully available before continuing + wait_iam_exists(connection, module) + new_user = True + + profile_changed, login_profile = ensure_login_profile( + connection, + module.check_mode, + user_name, + module.params.get("password"), + module.params.get("update_password"), + module.params.get("password_reset_required"), + new_user, + ) + changed |= profile_changed + + changed |= remove_login_profile( + connection, + module.check_mode, + user_name, + module.params.get("remove_password"), + new_user, + ) + + changed |= ensure_permissions_boundary( + connection, + module.check_mode, + user, + user_name, + boundary, + ) + + changed |= ensure_path( + connection, + module.check_mode, + user, + user_name, + module.params.get("path"), + ) + + changed |= ensure_managed_policies( + connection, + module.check_mode, + user_name, + module.params.get("managed_policies"), + module.params.get("purge_policies"), + ) + + changed |= ensure_user_tags( + connection, + module.check_mode, + user, + user_name, + module.params.get("tags"), + module.params.get("purge_tags"), + ) if module.check_mode: module.exit_json(changed=changed) # Get the user again - user = get_user(connection, module, params['UserName']) - if changed and new_login_profile: + user = get_iam_user(connection, user_name) + + if changed and login_profile: # `LoginProfile` is only returned on `create_login_profile` method - user['user']['password_reset_required'] = login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False) + user["password_reset_required"] = login_profile.get("LoginProfile", {}).get("PasswordResetRequired", False) + + try: + # (camel_dict_to_snake_dict doesn't handle lists, so do this as a merge of two dictionaries) + policies = {"attached_policies": _list_attached_policies(connection, user_name)} + user.update(camel_dict_to_snake_dict(policies)) + except AnsibleIAMError as e: + module.warn( + f"Failed to list attached policies - {str(e.exception)}", + ) + pass - module.exit_json(changed=changed, iam_user=user, user=user['user']) + module.exit_json(changed=changed, iam_user={"user": user}, user=user) -def destroy_user(connection, module): +@IAMErrorHandler.deletion_error_handler("delete access key") +def delete_access_key(connection, check_mode, user_name, key_id): + if check_mode: + return True + connection.delete_access_key(aws_retry=True, UserName=user_name, AccessKeyId=key_id) + return True - user_name = module.params.get('name') - user = get_user(connection, module, user_name) - # User is not present - if not user: - module.exit_json(changed=False) +@IAMErrorHandler.list_error_handler("list access keys") +def delete_access_keys(connection, check_mode, user_name): + access_keys = connection.list_access_keys(aws_retry=True, UserName=user_name)["AccessKeyMetadata"] + if not access_keys: + return False + for access_key in access_keys: + delete_access_key(connection, check_mode, user_name, access_key["AccessKeyId"]) + return True - # Check mode means we would remove this user - if module.check_mode: - module.exit_json(changed=True) - # Remove any attached policies otherwise deletion fails - try: - for policy in get_attached_policy_list(connection, module, user_name): - connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) +@IAMErrorHandler.deletion_error_handler("delete SSH key") +def delete_ssh_key(connection, check_mode, user_name, key_id): + if check_mode: + return True + connection.delete_ssh_public_key(aws_retry=True, UserName=user_name, SSHPublicKeyId=key_id) + return True - try: - # Remove user's access keys - access_keys = connection.list_access_keys(UserName=user_name)["AccessKeyMetadata"] - for access_key in access_keys: - connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"]) - - # Remove user's login profile (console password) - delete_login_profile(connection, module) - - # Remove user's ssh public keys - ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"] - for ssh_public_key in ssh_public_keys: - connection.delete_ssh_public_key(UserName=user_name, SSHPublicKeyId=ssh_public_key["SSHPublicKeyId"]) - - # Remove user's service specific credentials - service_credentials = connection.list_service_specific_credentials(UserName=user_name)["ServiceSpecificCredentials"] - for service_specific_credential in service_credentials: - connection.delete_service_specific_credential( - UserName=user_name, - ServiceSpecificCredentialId=service_specific_credential["ServiceSpecificCredentialId"] - ) - - # Remove user's signing certificates - signing_certificates = connection.list_signing_certificates(UserName=user_name)["Certificates"] - for signing_certificate in signing_certificates: - connection.delete_signing_certificate( - UserName=user_name, - CertificateId=signing_certificate["CertificateId"] - ) - - # Remove user's MFA devices - mfa_devices = connection.list_mfa_devices(UserName=user_name)["MFADevices"] - for mfa_device in mfa_devices: - connection.deactivate_mfa_device(UserName=user_name, SerialNumber=mfa_device["SerialNumber"]) - - # Remove user's inline policies - inline_policies = connection.list_user_policies(UserName=user_name)["PolicyNames"] - for policy_name in inline_policies: - connection.delete_user_policy(UserName=user_name, PolicyName=policy_name) - - # Remove user's group membership - user_groups = connection.list_groups_for_user(UserName=user_name)["Groups"] - for group in user_groups: - connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"]) - - connection.delete_user(UserName=user_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) - - module.exit_json(changed=True) - - -def get_user(connection, module, name): - - params = dict() - params['UserName'] = name - try: - user = connection.get_user(**params) - except is_boto3_error_code('NoSuchEntity'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) +@IAMErrorHandler.list_error_handler("list SSH keys") +def delete_ssh_public_keys(connection, check_mode, user_name): + public_keys = connection.list_ssh_public_keys(aws_retry=True, UserName=user_name)["SSHPublicKeys"] + if not public_keys: + return False + for public_key in public_keys: + delete_ssh_key(connection, check_mode, user_name, public_key["SSHPublicKeyId"]) + return True - tags = boto3_tag_list_to_ansible_dict(user['User'].pop('Tags', [])) - user = camel_dict_to_snake_dict(user) - user['user']['tags'] = tags - return user +@IAMErrorHandler.deletion_error_handler("delete service credential") +def delete_service_credential(connection, check_mode, user_name, cred_id): + if check_mode: + return True + connection.delete_ssh_public_key(aws_retry=True, UserName=user_name, SSHPublicKeyId=cred_id) + return True -def get_attached_policy_list(connection, module, name): - try: - return connection.list_attached_user_policies(UserName=name)['AttachedPolicies'] - except is_boto3_error_code('NoSuchEntity'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) - - -def user_has_login_profile(connection, module, name): - ''' - Returns whether or not given user has a login profile. - Parameters: - connection: IAM client - module: AWSModule - name (str): Username of user - Returns: - (bool): True if user had login profile, False if not - ''' - try: - connection.get_login_profile(UserName=name) - except is_boto3_error_code('NoSuchEntity'): +@IAMErrorHandler.list_error_handler("list service credentials") +def delete_service_credentials(connection, check_mode, user_name): + credentials = connection.list_service_specific_credentials(aws_retry=True, UserName=user_name)[ + "ServiceSpecificCredentials" + ] + if not credentials: return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get login profile for user {0}".format(name)) + for credential in credentials: + delete_service_credential(connection, check_mode, user_name, credential["ServiceSpecificCredentialId"]) return True -def update_user_tags(connection, module, params, user): - user_name = params['UserName'] - existing_tags = user['user']['tags'] - new_tags = params.get('Tags') - if new_tags is None: +@IAMErrorHandler.deletion_error_handler("delete signing certificate") +def delete_signing_certificate(connection, check_mode, user_name, cert_id): + if check_mode: + return True + connection.delete_signing_certificate(aws_retry=True, UserName=user_name, CertificateId=cert_id) + return True + + +@IAMErrorHandler.list_error_handler("list signing certificates") +def delete_signing_certificates(connection, check_mode, user_name): + certificates = connection.list_signing_certificates(aws_retry=True, UserName=user_name)["Certificates"] + if not certificates: return False - new_tags = boto3_tag_list_to_ansible_dict(new_tags) + for certificate in certificates: + delete_signing_certificate(connection, check_mode, user_name, certificate["CertificateId"]) + return True - purge_tags = module.params.get('purge_tags') - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) +@IAMErrorHandler.deletion_error_handler("delete MFA device") +def delete_mfa_device(connection, check_mode, user_name, device_id): + if check_mode: + return True + connection.deactivate_mfa_device(aws_retry=True, UserName=user_name, SerialNumber=device_id) + return True - if not module.check_mode: - try: - if tags_to_remove: - connection.untag_user(UserName=user_name, TagKeys=tags_to_remove) - if tags_to_add: - connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for user %s' % user_name) - changed = bool(tags_to_add) or bool(tags_to_remove) - return changed +@IAMErrorHandler.list_error_handler("list MFA devices") +def delete_mfa_devices(connection, check_mode, user_name): + devices = connection.list_mfa_devices(aws_retry=True, UserName=user_name)["MFADevices"] + if not devices: + return False + for device in devices: + delete_mfa_device(connection, check_mode, user_name, device["SerialNumber"]) + return True -def main(): +def detach_all_policies(connection, check_mode, user_name): + # Remove any attached policies + attached_policies_desc = _list_attached_policies(connection, user_name) + current_attached_policies = [policy["PolicyArn"] for policy in attached_policies_desc] + detach_policies(connection, check_mode, user_name, current_attached_policies) + + +@IAMErrorHandler.deletion_error_handler("delete inline policy") +def delete_inline_policy(connection, check_mode, user_name, policy): + if check_mode: + return True + connection.delete_user_policy(aws_retry=True, UserName=user_name, PolicyName=policy) + return True + + +@IAMErrorHandler.list_error_handler("list inline policies") +def delete_inline_policies(connection, check_mode, user_name): + inline_policies = connection.list_user_policies(aws_retry=True, UserName=user_name)["PolicyNames"] + if not inline_policies: + return False + for policy_name in inline_policies: + delete_inline_policy(connection, check_mode, user_name, policy_name) + return True + + +@IAMErrorHandler.deletion_error_handler("remove user from group") +def remove_from_group(connection, check_mode, user_name, group_name): + if check_mode: + return True + connection.remove_user_from_group(aws_retry=True, UserName=user_name, GroupName=group_name) + return True + + +@IAMErrorHandler.list_error_handler("list groups containing user") +def remove_from_all_groups(connection, check_mode, user_name): + user_groups = connection.list_groups_for_user(aws_retry=True, UserName=user_name)["Groups"] + if not user_groups: + return False + for group in user_groups: + remove_from_group(connection, check_mode, user_name, group["GroupName"]) + return True + + +@IAMErrorHandler.deletion_error_handler("delete user") +def delete_user(connection, check_mode, user_name): + if check_mode: + return True + connection.delete_user(aws_retry=True, UserName=user_name) + return True + + +def destroy_user(connection, module): + user_name = module.params.get("name") + + user = get_iam_user(connection, user_name) + # User is not present + if not user: + module.exit_json(changed=False) + # Check mode means we would remove this user + if module.check_mode: + module.exit_json(changed=True) + + # Prior to removing the user we need to remove all of the related resources, or deletion will + # fail. + # Because policies (direct and indrect) can contain Deny rules, order is important here in case + # we fail during deletion: lock out the user first *then* start removing policies... + # - Prevent the user from creating new sessions + # - Login profile + # - Access keys + # - SSH keys + # - Service Credentials + # - Certificates + # - MFA Token (last so we don't end up in a state where it's possible still use password/keys) + # - Remove policies and group membership + # - Managed policies + # - Inline policies + # - Group membership + + remove_login_profile(connection, module.check_mode, user_name, True, False) + delete_access_keys(connection, module.check_mode, user_name) + delete_ssh_public_keys(connection, module.check_mode, user_name) + delete_service_credentials(connection, module.check_mode, user_name) + delete_signing_certificates(connection, module.check_mode, user_name) + delete_mfa_devices(connection, module.check_mode, user_name) + detach_all_policies(connection, module.check_mode, user_name) + delete_inline_policies(connection, module.check_mode, user_name) + remove_from_all_groups(connection, module.check_mode, user_name) + changed = delete_user(connection, module.check_mode, user_name) + module.exit_json(changed=changed) + + +def main(): argument_spec = dict( - name=dict(required=True, type='str'), - password=dict(type='str', no_log=True), - password_reset_required=dict(type='bool', default=False, no_log=False), - update_password=dict(default='always', choices=['always', 'on_create'], no_log=False), - remove_password=dict(type='bool', no_log=False), - managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), - state=dict(choices=['present', 'absent'], required=True), - purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=120, type='int'), + name=dict(required=True, type="str", aliases=["user_name"]), + path=dict(type="str", aliases=["prefix", "path_prefix"]), + boundary=dict(type="str", aliases=["boundary_policy_arn", "permissions_boundary"]), + password=dict(type="str", no_log=True), + password_reset_required=dict(type="bool", default=False, no_log=False), + update_password=dict(default="always", choices=["always", "on_create"], no_log=False), + remove_password=dict(type="bool", no_log=False), + managed_policies=dict(default=[], type="list", aliases=["managed_policy"], elements="str"), + state=dict(choices=["present", "absent"], required=True), + purge_policies=dict(default=False, type="bool", aliases=["purge_policy", "purge_managed_policies"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=120, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['password', 'remove_password']], + mutually_exclusive=[["password", "remove_password"]], + ) + + module.deprecate( + "The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.", + date="2024-05-01", + collection_name="amazon.aws", ) - module.deprecate("The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.", - date='2024-05-01', collection_name='amazon.aws') + identifier_problem = validate_iam_identifiers( + "user", name=module.params.get("name"), path=module.params.get("path") + ) + if identifier_problem: + module.fail_json(msg=identifier_problem) - connection = module.client('iam') + retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=["EntityTemporarilyUnmodifiable"]) + connection = module.client("iam", retry_decorator=retry_decorator) state = module.params.get("state") - if state == 'present': - create_or_update_user(connection, module) - else: - destroy_user(connection, module) + try: + if state == "present": + create_or_update_user(connection, module) + else: + destroy_user(connection, module) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py index e9c95edca..259d26803 100644 --- a/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py @@ -1,14 +1,10 @@ #!/usr/bin/python - # -*- coding: utf-8 -*- + # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_user_info version_added: 5.0.0 @@ -23,28 +19,32 @@ options: name: description: - The name of the IAM user to look for. + - C(user_name) was added as an alias in release 7.2.0. required: false type: str + aliases: ["user_name"] group: description: - The group name name of the IAM user to look for. Mutually exclusive with C(path). + - C(group_name) was added as an alias in release 7.2.0. required: false type: str - path: + aliases: ["group_name"] + path_prefix: description: - The path to the IAM user. Mutually exclusive with C(group). - If specified, then would get all user names whose path starts with user provided value. required: false default: '/' type: str + aliases: ["path", "prefix"] extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather facts about "test" user. - name: Get IAM user info @@ -60,9 +60,9 @@ EXAMPLES = r''' - name: Get IAM user info amazon.aws.iam_user_info: path: "/division_abc/subdivision_xyz/" -''' +""" -RETURN = r''' +RETURN = r""" iam_users: description: list of maching iam users returned: success @@ -103,97 +103,63 @@ iam_users: type: dict returned: if user exists sample: '{"Env": "Prod"}' -''' - -try: - from botocore.exceptions import BotoCoreError, ClientError -except ImportError: - pass # caught by AnsibleAWSModule +""" -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_group +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_user +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_users +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_user +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +def _list_users(connection, name, group, path): + # name but not path or group + if name and not (path or group): + return [get_iam_user(connection, name)] -@AWSRetry.exponential_backoff() -def list_iam_users_with_backoff(client, operation, **kwargs): - paginator = client.get_paginator(operation) - return paginator.paginate(**kwargs).build_full_result() - - -def describe_iam_user(user): - tags = boto3_tag_list_to_ansible_dict(user.pop('Tags', [])) - user = camel_dict_to_snake_dict(user) - user['tags'] = tags - return user - + if group: + iam_users = get_iam_group(connection, group)["Users"] + else: + iam_users = list_iam_users(connection, path=path) -def list_iam_users(connection, module): + if not iam_users: + return [] - name = module.params.get('name') - group = module.params.get('group') - path = module.params.get('path') + # filter by name when a path or group was specified + if name: + iam_users = [u for u in iam_users if u["UserName"] == name] - params = dict() - iam_users = [] + return iam_users - if not group and not path: - if name: - params['UserName'] = name - try: - iam_users.append(connection.get_user(**params)['User']) - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) - if group: - params['GroupName'] = group - try: - iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users'] - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group) - if name: - iam_users = [user for user in iam_users if user['UserName'] == name] - - if path and not group: - params['PathPrefix'] = path - try: - iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users'] - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path) - if name: - iam_users = [user for user in iam_users if user['UserName'] == name] - - module.exit_json(iam_users=[describe_iam_user(user) for user in iam_users]) +def list_users(connection, name, group, path): + users = _list_users(connection, name, group, path) + users = [u for u in users if u is not None] + return [normalize_iam_user(user) for user in users] def main(): argument_spec = dict( - name=dict(), - group=dict(), - path=dict(default='/') + name=dict(aliases=["user_name"]), + group=dict(aliases=["group_name"]), + path_prefix=dict(aliases=["path", "prefix"], default="/"), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['group', 'path'] - ], - supports_check_mode=True + argument_spec=argument_spec, mutually_exclusive=[["group", "path_prefix"]], supports_check_mode=True ) - connection = module.client('iam') + name = module.params.get("name") + group = module.params.get("group") + path = module.params.get("path_prefix") - list_iam_users(connection, module) + connection = module.client("iam") + try: + module.exit_json(changed=False, iam_users=list_users(connection, name, group, path)) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key.py b/ansible_collections/amazon/aws/plugins/modules/kms_key.py index 0cbaa9b05..82f73b370 100644 --- a/ansible_collections/amazon/aws/plugins/modules/kms_key.py +++ b/ansible_collections/amazon/aws/plugins/modules/kms_key.py @@ -1,12 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -* -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: kms_key version_added: 5.0.0 @@ -148,8 +146,8 @@ author: - Will Thames (@willthames) - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 @@ -158,9 +156,9 @@ notes: This can cause issues when running duplicate tasks in succession or using the M(amazon.aws.kms_key_info) module to fetch key metadata shortly after modifying keys. For this reason, it is recommended to use the return data from this module (M(amazon.aws.kms_key)) to fetch a key's metadata. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create a new KMS key - amazon.aws.kms_key: alias: mykey @@ -211,9 +209,9 @@ EXAMPLES = r''' alias: my-kms-key policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}" state: present -''' +""" -RETURN = r''' +RETURN = r""" key_id: description: ID of key. type: str @@ -435,16 +433,14 @@ multi_region: version_added: 5.5.0 returned: always sample: False - - -''' +""" # these mappings are used to go from simple labels to the actual 'Sid' values returned # by get_policy. They seem to be magic values. statement_label = { - 'role': 'Allow use of the key', - 'role grant': 'Allow attachment of persistent resources', - 'admin': 'Allow access for Key Administrators' + "role": "Allow use of the key", + "role grant": "Allow attachment of persistent resources", + "admin": "Allow access for Key Administrators", } import json @@ -454,44 +450,45 @@ try: except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_iam_roles_with_backoff(connection): - paginator = connection.get_paginator('list_roles') + paginator = connection.get_paginator("list_roles") return paginator.paginate().build_full_result() @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_keys_with_backoff(connection): - paginator = connection.get_paginator('list_keys') + paginator = connection.get_paginator("list_keys") return paginator.paginate().build_full_result() @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_aliases_with_backoff(connection): - paginator = connection.get_paginator('list_aliases') + paginator = connection.get_paginator("list_aliases") return paginator.paginate().build_full_result() def get_kms_aliases_lookup(connection): _aliases = dict() - for alias in get_kms_aliases_with_backoff(connection)['Aliases']: + for alias in get_kms_aliases_with_backoff(connection)["Aliases"]: # Not all aliases are actually associated with a key - if 'TargetKeyId' in alias: + if "TargetKeyId" in alias: # strip off leading 'alias/' and add it to key's aliases - if alias['TargetKeyId'] in _aliases: - _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) + if alias["TargetKeyId"] in _aliases: + _aliases[alias["TargetKeyId"]].append(alias["AliasName"][6:]) else: - _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] + _aliases[alias["TargetKeyId"]] = [alias["AliasName"][6:]] return _aliases @@ -503,7 +500,7 @@ def get_kms_tags_with_backoff(connection, key_id, **kwargs): @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_grants_with_backoff(connection, key_id): params = dict(KeyId=key_id) - paginator = connection.get_paginator('list_grants') + paginator = connection.get_paginator("list_grants") return paginator.paginate(**params).build_full_result() @@ -514,7 +511,7 @@ def get_kms_metadata_with_backoff(connection, key_id): @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_key_policies_with_backoff(connection, key_id): - paginator = connection.get_paginator('list_key_policies') + paginator = connection.get_paginator("list_key_policies") return paginator.paginate(KeyId=key_id).build_full_result() @@ -532,13 +529,16 @@ def get_kms_tags(connection, module, key_id): while more: try: tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) - tags.extend(tag_response['Tags']) - except is_boto3_error_code('AccessDeniedException'): + tags.extend(tag_response["Tags"]) + except is_boto3_error_code("AccessDeniedException"): tag_response = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key tags") - if tag_response.get('NextMarker'): - kwargs['Marker'] = tag_response['NextMarker'] + if tag_response.get("NextMarker"): + kwargs["Marker"] = tag_response["NextMarker"] else: more = False return tags @@ -546,34 +546,34 @@ def get_kms_tags(connection, module, key_id): def get_kms_policies(connection, module, key_id): try: - policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] - return [ - get_key_policy_with_backoff(connection, key_id, policy)['Policy'] - for policy in policies - ] - except is_boto3_error_code('AccessDeniedException'): + policies = list_key_policies_with_backoff(connection, key_id)["PolicyNames"] + return [get_key_policy_with_backoff(connection, key_id, policy)["Policy"] for policy in policies] + except is_boto3_error_code("AccessDeniedException"): return [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key policies") def camel_to_snake_grant(grant): - '''camel_to_snake_grant snakifies everything except the encryption context ''' - constraints = grant.get('Constraints', {}) + """camel_to_snake_grant snakifies everything except the encryption context""" + constraints = grant.get("Constraints", {}) result = camel_dict_to_snake_dict(grant) - if 'EncryptionContextEquals' in constraints: - result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals'] - if 'EncryptionContextSubset' in constraints: - result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset'] + if "EncryptionContextEquals" in constraints: + result["constraints"]["encryption_context_equals"] = constraints["EncryptionContextEquals"] + if "EncryptionContextSubset" in constraints: + result["constraints"]["encryption_context_subset"] = constraints["EncryptionContextSubset"] return result def get_key_details(connection, module, key_id): try: - result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + result = get_kms_metadata_with_backoff(connection, key_id)["KeyMetadata"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain key metadata") - result['KeyArn'] = result.pop('Arn') + result["KeyArn"] = result.pop("Arn") try: aliases = get_kms_aliases_lookup(connection) @@ -582,71 +582,68 @@ def get_key_details(connection, module, key_id): try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled') - except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: - result['enable_key_rotation'] = None - result['aliases'] = aliases.get(result['KeyId'], []) + result["enable_key_rotation"] = current_rotation_status.get("KeyRotationEnabled") + except is_boto3_error_code(["AccessDeniedException", "UnsupportedOperationException"]) as e: + result["enable_key_rotation"] = None + result["aliases"] = aliases.get(result["KeyId"], []) result = camel_dict_to_snake_dict(result) # grants and tags get snakified differently try: - result['grants'] = [ - camel_to_snake_grant(grant) - for grant in get_kms_grants_with_backoff(connection, key_id)['Grants'] + result["grants"] = [ + camel_to_snake_grant(grant) for grant in get_kms_grants_with_backoff(connection, key_id)["Grants"] ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain key grants") tags = get_kms_tags(connection, module, key_id) - result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') - result['policies'] = get_kms_policies(connection, module, key_id) - result['key_policies'] = [json.loads(policy) for policy in result['policies']] + result["tags"] = boto3_tag_list_to_ansible_dict(tags, "TagKey", "TagValue") + result["policies"] = get_kms_policies(connection, module, key_id) + result["key_policies"] = [json.loads(policy) for policy in result["policies"]] return result def get_kms_facts(connection, module): try: - keys = get_kms_keys_with_backoff(connection)['Keys'] + keys = get_kms_keys_with_backoff(connection)["Keys"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain keys") - return [get_key_details(connection, module, key['KeyId']) for key in keys] + return [get_key_details(connection, module, key["KeyId"]) for key in keys] def convert_grant_params(grant, key): - grant_params = dict( - KeyId=key['key_arn'], GranteePrincipal=grant['grantee_principal'] - ) - if grant.get('operations'): - grant_params['Operations'] = grant['operations'] - if grant.get('retiring_principal'): - grant_params['RetiringPrincipal'] = grant['retiring_principal'] - if grant.get('name'): - grant_params['Name'] = grant['name'] - if grant.get('constraints'): - grant_params['Constraints'] = dict() - if grant['constraints'].get('encryption_context_subset'): - grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset'] - if grant['constraints'].get('encryption_context_equals'): - grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals'] + grant_params = dict(KeyId=key["key_arn"], GranteePrincipal=grant["grantee_principal"]) + if grant.get("operations"): + grant_params["Operations"] = grant["operations"] + if grant.get("retiring_principal"): + grant_params["RetiringPrincipal"] = grant["retiring_principal"] + if grant.get("name"): + grant_params["Name"] = grant["name"] + if grant.get("constraints"): + grant_params["Constraints"] = dict() + if grant["constraints"].get("encryption_context_subset"): + grant_params["Constraints"]["EncryptionContextSubset"] = grant["constraints"]["encryption_context_subset"] + if grant["constraints"].get("encryption_context_equals"): + grant_params["Constraints"]["EncryptionContextEquals"] = grant["constraints"]["encryption_context_equals"] return grant_params def different_grant(existing_grant, desired_grant): - if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'): + if existing_grant.get("grantee_principal") != desired_grant.get("grantee_principal"): return True - if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'): + if existing_grant.get("retiring_principal") != desired_grant.get("retiring_principal"): return True - if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')): + if set(existing_grant.get("operations", [])) != set(desired_grant.get("operations")): return True - if existing_grant.get('constraints') != desired_grant.get('constraints'): + if existing_grant.get("constraints") != desired_grant.get("constraints"): return True return False def compare_grants(existing_grants, desired_grants, purge_grants=False): - existing_dict = dict((eg['name'], eg) for eg in existing_grants) - desired_dict = dict((dg['name'], dg) for dg in desired_grants) + existing_dict = dict((eg["name"], eg) for eg in existing_grants) + desired_dict = dict((dg["name"], dg) for dg in desired_grants) to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys()) if purge_grants: to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys()) @@ -670,15 +667,15 @@ def compare_grants(existing_grants, desired_grants, purge_grants=False): def start_key_deletion(connection, module, key_metadata): - if key_metadata['KeyState'] == 'PendingDeletion': + if key_metadata["KeyState"] == "PendingDeletion": return False if module.check_mode: return True - deletion_params = {'KeyId': key_metadata['Arn']} - if module.params.get('pending_window'): - deletion_params['PendingWindowInDays'] = module.params.get('pending_window') + deletion_params = {"KeyId": key_metadata["Arn"]} + if module.params.get("pending_window"): + deletion_params["PendingWindowInDays"] = module.params.get("pending_window") try: connection.schedule_key_deletion(**deletion_params) @@ -688,8 +685,8 @@ def start_key_deletion(connection, module, key_metadata): def cancel_key_deletion(connection, module, key): - key_id = key['key_arn'] - if key['key_state'] != 'PendingDeletion': + key_id = key["key_arn"] + if key["key_state"] != "PendingDeletion": return False if module.check_mode: @@ -699,7 +696,7 @@ def cancel_key_deletion(connection, module, key): connection.cancel_key_deletion(KeyId=key_id) # key is disabled after deletion cancellation # set this so that ensure_enabled_disabled works correctly - key['key_state'] = 'Disabled' + key["key_state"] = "Disabled" except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to cancel key deletion") @@ -707,14 +704,14 @@ def cancel_key_deletion(connection, module, key): def ensure_enabled_disabled(connection, module, key, enabled): - desired_state = 'Enabled' + desired_state = "Enabled" if not enabled: - desired_state = 'Disabled' + desired_state = "Disabled" - if key['key_state'] == desired_state: + if key["key_state"] == desired_state: return False - key_id = key['key_arn'] + key_id = key["key_arn"] if not module.check_mode: if enabled: try: @@ -736,10 +733,10 @@ def update_alias(connection, module, key, alias): if alias is None: return False - key_id = key['key_arn'] - aliases = get_kms_aliases_with_backoff(connection)['Aliases'] + key_id = key["key_arn"] + aliases = get_kms_aliases_with_backoff(connection)["Aliases"] # We will only add new aliases, not rename existing ones - if alias in [_alias['AliasName'] for _alias in aliases]: + if alias in [_alias["AliasName"] for _alias in aliases]: return False if not module.check_mode: @@ -754,10 +751,10 @@ def update_alias(connection, module, key, alias): def update_description(connection, module, key, description): if description is None: return False - if key['description'] == description: + if key["description"] == description: return False - key_id = key['key_arn'] + key_id = key["key_arn"] if not module.check_mode: try: connection.update_key_description(KeyId=key_id, Description=description) @@ -771,11 +768,11 @@ def update_tags(connection, module, key, desired_tags, purge_tags): if desired_tags is None: return False - to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags) + to_add, to_remove = compare_aws_tags(key["tags"], desired_tags, purge_tags) if not (bool(to_add) or bool(to_remove)): return False - key_id = key['key_arn'] + key_id = key["key_arn"] if not module.check_mode: if to_remove: try: @@ -785,9 +782,9 @@ def update_tags(connection, module, key, desired_tags, purge_tags): if to_add: try: tags = ansible_dict_to_boto3_tag_list( - module.params['tags'], - tag_name_key_name='TagKey', - tag_value_key_name='TagValue', + module.params["tags"], + tag_name_key_name="TagKey", + tag_value_key_name="TagValue", ) connection.tag_resource(KeyId=key_id, Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -804,10 +801,10 @@ def update_policy(connection, module, key, policy): except ValueError as e: module.fail_json_aws(e, msg="Unable to parse new policy as JSON") - key_id = key['key_arn'] + key_id = key["key_arn"] try: - keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default') - original_policy = json.loads(keyret['Policy']) + keyret = connection.get_key_policy(KeyId=key_id, PolicyName="default") + original_policy = json.loads(keyret["Policy"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): # If we can't fetch the current policy assume we're making a change # Could occur if we have PutKeyPolicy without GetKeyPolicy @@ -818,7 +815,7 @@ def update_policy(connection, module, key, policy): if not module.check_mode: try: - connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy) + connection.put_key_policy(KeyId=key_id, PolicyName="default", Policy=policy) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update key policy") @@ -828,15 +825,18 @@ def update_policy(connection, module, key, policy): def update_key_rotation(connection, module, key, enable_key_rotation): if enable_key_rotation is None: return False - key_id = key['key_arn'] + key_id = key["key_arn"] try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation: + if current_rotation_status.get("KeyRotationEnabled") == enable_key_rotation: return False - except is_boto3_error_code('AccessDeniedException'): + except is_boto3_error_code("AccessDeniedException"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get current key rotation status") if not module.check_mode: @@ -852,17 +852,17 @@ def update_key_rotation(connection, module, key, enable_key_rotation): def update_grants(connection, module, key, desired_grants, purge_grants): - existing_grants = key['grants'] + existing_grants = key["grants"] to_add, to_remove = compare_grants(existing_grants, desired_grants, purge_grants) if not (bool(to_add) or bool(to_remove)): return False - key_id = key['key_arn'] + key_id = key["key_arn"] if not module.check_mode: for grant in to_remove: try: - connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id']) + connection.retire_grant(KeyId=key_id, GrantId=grant["grant_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to retire grant") for grant in to_add: @@ -879,61 +879,61 @@ def update_key(connection, module, key): changed = False changed |= cancel_key_deletion(connection, module, key) - changed |= ensure_enabled_disabled(connection, module, key, module.params['enabled']) - changed |= update_alias(connection, module, key, module.params['alias']) - changed |= update_description(connection, module, key, module.params['description']) - changed |= update_tags(connection, module, key, module.params['tags'], module.params.get('purge_tags')) - changed |= update_policy(connection, module, key, module.params.get('policy')) - changed |= update_grants(connection, module, key, module.params.get('grants'), module.params.get('purge_grants')) - changed |= update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) + changed |= ensure_enabled_disabled(connection, module, key, module.params["enabled"]) + changed |= update_alias(connection, module, key, module.params["alias"]) + changed |= update_description(connection, module, key, module.params["description"]) + changed |= update_tags(connection, module, key, module.params["tags"], module.params.get("purge_tags")) + changed |= update_policy(connection, module, key, module.params.get("policy")) + changed |= update_grants(connection, module, key, module.params.get("grants"), module.params.get("purge_grants")) + changed |= update_key_rotation(connection, module, key, module.params.get("enable_key_rotation")) # make results consistent with kms_facts before returning - result = get_key_details(connection, module, key['key_arn']) - result['changed'] = changed + result = get_key_details(connection, module, key["key_arn"]) + result["changed"] = changed return result def create_key(connection, module): - key_usage = module.params.get('key_usage') - key_spec = module.params.get('key_spec') - multi_region = module.params.get('multi_region') + key_usage = module.params.get("key_usage") + key_spec = module.params.get("key_spec") + multi_region = module.params.get("multi_region") tags_list = ansible_dict_to_boto3_tag_list( - module.params['tags'] or {}, + module.params["tags"] or {}, # KMS doesn't use 'Key' and 'Value' as other APIs do. - tag_name_key_name='TagKey', - tag_value_key_name='TagValue', + tag_name_key_name="TagKey", + tag_value_key_name="TagValue", ) params = dict( BypassPolicyLockoutSafetyCheck=False, Tags=tags_list, KeyUsage=key_usage, CustomerMasterKeySpec=key_spec, - Origin='AWS_KMS', + Origin="AWS_KMS", MultiRegion=multi_region, ) if module.check_mode: - return {'changed': True} + return {"changed": True} - if module.params.get('description'): - params['Description'] = module.params['description'] - if module.params.get('policy'): - params['Policy'] = module.params['policy'] + if module.params.get("description"): + params["Description"] = module.params["description"] + if module.params.get("policy"): + params["Policy"] = module.params["policy"] try: - result = connection.create_key(**params)['KeyMetadata'] + result = connection.create_key(**params)["KeyMetadata"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create initial key") - key = get_key_details(connection, module, result['KeyId']) - update_alias(connection, module, key, module.params['alias']) - update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) + key = get_key_details(connection, module, result["KeyId"]) + update_alias(connection, module, key, module.params["alias"]) + update_key_rotation(connection, module, key, module.params.get("enable_key_rotation")) - ensure_enabled_disabled(connection, module, key, module.params.get('enabled')) - update_grants(connection, module, key, module.params.get('grants'), False) + ensure_enabled_disabled(connection, module, key, module.params.get("enabled")) + update_grants(connection, module, key, module.params.get("grants"), False) # make results consistent with kms_facts - result = get_key_details(connection, module, key['key_id']) - result['changed'] = True + result = get_key_details(connection, module, key["key_id"]) + result["changed"] = True return result @@ -942,24 +942,24 @@ def delete_key(connection, module, key_metadata): changed |= start_key_deletion(connection, module, key_metadata) - result = get_key_details(connection, module, key_metadata['Arn']) - result['changed'] = changed + result = get_key_details(connection, module, key_metadata["Arn"]) + result["changed"] = changed return result def get_arn_from_role_name(iam, rolename): ret = iam.get_role(RoleName=rolename) - if ret.get('Role') and ret['Role'].get('Arn'): - return ret['Role']['Arn'] - raise Exception('could not find arn for name {0}.'.format(rolename)) + if ret.get("Role") and ret["Role"].get("Arn"): + return ret["Role"]["Arn"] + raise Exception(f"could not find arn for name {rolename}.") def canonicalize_alias_name(alias): if alias is None: return None - if alias.startswith('alias/'): + if alias.startswith("alias/"): return alias - return 'alias/' + alias + return "alias/" + alias def fetch_key_metadata(connection, module, key_id, alias): @@ -969,14 +969,14 @@ def fetch_key_metadata(connection, module, key_id, alias): # Integration tests will wait for 10 seconds to combat this issue. # See https://github.com/ansible-collections/community.aws/pull/1052. - alias = canonicalize_alias_name(module.params.get('alias')) + alias = canonicalize_alias_name(module.params.get("alias")) try: # Fetch by key_id where possible if key_id: - return get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + return get_kms_metadata_with_backoff(connection, key_id)["KeyMetadata"] # Or try alias as a backup - return get_kms_metadata_with_backoff(connection, alias)['KeyMetadata'] + return get_kms_metadata_with_backoff(connection, alias)["KeyMetadata"] except connection.exceptions.NotFoundException: return None @@ -986,88 +986,77 @@ def fetch_key_metadata(connection, module, key_id, alias): def validate_params(module, key_metadata): # We can't create keys with a specific ID, if we can't access the key we'll have to fail - if ( - module.params.get('state') == 'present' - and module.params.get('key_id') - and not key_metadata - ): - module.fail_json( - msg='Could not find key with id {0} to update'.format( - module.params.get('key_id') - ) - ) - if ( - module.params.get('multi_region') - and key_metadata - and module.params.get('state') == 'present' - ): - module.fail_json( - msg='You cannot change the multi-region property on an existing key.' - ) + if module.params.get("state") == "present" and module.params.get("key_id") and not key_metadata: + module.fail_json(msg=f"Could not find key with id {module.params.get('key_id')} to update") + if module.params.get("multi_region") and key_metadata and module.params.get("state") == "present": + module.fail_json(msg="You cannot change the multi-region property on an existing key.") def main(): argument_spec = dict( - alias=dict(aliases=['key_alias']), - pending_window=dict(aliases=['deletion_delay'], type='int'), - key_id=dict(aliases=['key_arn']), + alias=dict(aliases=["key_alias"]), + pending_window=dict(aliases=["deletion_delay"], type="int"), + key_id=dict(aliases=["key_arn"]), description=dict(), - enabled=dict(type='bool', default=True), - multi_region=dict(type='bool', default=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - grants=dict(type='list', default=[], elements='dict'), - policy=dict(type='json'), - purge_grants=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - enable_key_rotation=(dict(type='bool')), + enabled=dict(type="bool", default=True), + multi_region=dict(type="bool", default=False), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + grants=dict(type="list", default=[], elements="dict"), + policy=dict(type="json"), + purge_grants=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + enable_key_rotation=(dict(type="bool")), key_spec=dict( - type='str', - default='SYMMETRIC_DEFAULT', - aliases=['customer_master_key_spec'], + type="str", + default="SYMMETRIC_DEFAULT", + aliases=["customer_master_key_spec"], choices=[ - 'SYMMETRIC_DEFAULT', - 'RSA_2048', - 'RSA_3072', - 'RSA_4096', - 'ECC_NIST_P256', - 'ECC_NIST_P384', - 'ECC_NIST_P521', - 'ECC_SECG_P256K1', + "SYMMETRIC_DEFAULT", + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", ], ), key_usage=dict( - type='str', - default='ENCRYPT_DECRYPT', - choices=['ENCRYPT_DECRYPT', 'SIGN_VERIFY'], + type="str", + default="ENCRYPT_DECRYPT", + choices=["ENCRYPT_DECRYPT", "SIGN_VERIFY"], ), ) module = AnsibleAWSModule( supports_check_mode=True, argument_spec=argument_spec, - required_one_of=[['alias', 'key_id']], + required_one_of=[["alias", "key_id"]], ) - kms = module.client('kms') + kms = module.client("kms") module.deprecate( - "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", - date='2024-05-01', - collection_name='amazon.aws', + ( + "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned" + " for now." + ), + date="2024-05-01", + collection_name="amazon.aws", ) - key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias')) + key_metadata = fetch_key_metadata(kms, module, module.params.get("key_id"), module.params.get("alias")) validate_params(module, key_metadata) - if module.params.get('state') == 'absent': + if module.params.get("state") == "absent": if key_metadata is None: module.exit_json(changed=False) result = delete_key(kms, module, key_metadata) module.exit_json(**result) if key_metadata: - key_details = get_key_details(kms, module, key_metadata['Arn']) + key_details = get_key_details(kms, module, key_metadata["Arn"]) result = update_key(kms, module, key_details) module.exit_json(**result) @@ -1075,5 +1064,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py index ba8f30a2f..4ba249940 100644 --- a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: kms_key_info version_added: 5.0.0 @@ -52,12 +50,12 @@ options: default: False type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all KMS keys @@ -72,9 +70,9 @@ EXAMPLES = r''' - amazon.aws.kms_key_info: filters: "tag:Name": Example -''' +""" -RETURN = r''' +RETURN = r""" kms_keys: description: List of keys. type: complex @@ -284,7 +282,7 @@ kms_keys: type: str returned: always sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz -''' +""" import json @@ -295,10 +293,10 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict # Caching lookup for aliases _aliases = dict() @@ -306,26 +304,26 @@ _aliases = dict() @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_keys_with_backoff(connection): - paginator = connection.get_paginator('list_keys') + paginator = connection.get_paginator("list_keys") return paginator.paginate().build_full_result() @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_aliases_with_backoff(connection): - paginator = connection.get_paginator('list_aliases') + paginator = connection.get_paginator("list_aliases") return paginator.paginate().build_full_result() def get_kms_aliases_lookup(connection): if not _aliases: - for alias in get_kms_aliases_with_backoff(connection)['Aliases']: + for alias in get_kms_aliases_with_backoff(connection)["Aliases"]: # Not all aliases are actually associated with a key - if 'TargetKeyId' in alias: + if "TargetKeyId" in alias: # strip off leading 'alias/' and add it to key's aliases - if alias['TargetKeyId'] in _aliases: - _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) + if alias["TargetKeyId"] in _aliases: + _aliases[alias["TargetKeyId"]].append(alias["AliasName"][6:]) else: - _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] + _aliases[alias["TargetKeyId"]] = [alias["AliasName"][6:]] return _aliases @@ -337,9 +335,9 @@ def get_kms_tags_with_backoff(connection, key_id, **kwargs): @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_grants_with_backoff(connection, key_id, **kwargs): params = dict(KeyId=key_id) - if kwargs.get('tokens'): - params['GrantTokens'] = kwargs['tokens'] - paginator = connection.get_paginator('list_grants') + if kwargs.get("tokens"): + params["GrantTokens"] = kwargs["tokens"] + paginator = connection.get_paginator("list_grants") return paginator.paginate(**params).build_full_result() @@ -350,7 +348,7 @@ def get_kms_metadata_with_backoff(connection, key_id): @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_key_policies_with_backoff(connection, key_id): - paginator = connection.get_paginator('list_key_policies') + paginator = connection.get_paginator("list_key_policies") return paginator.paginate(KeyId=key_id).build_full_result() @@ -363,18 +361,18 @@ def get_key_policy_with_backoff(connection, key_id, policy_name): def get_enable_key_rotation_with_backoff(connection, key_id): try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: + except is_boto3_error_code(["AccessDeniedException", "UnsupportedOperationException"]): return None - return current_rotation_status.get('KeyRotationEnabled') + return current_rotation_status.get("KeyRotationEnabled") def canonicalize_alias_name(alias): if alias is None: return None - if alias.startswith('alias/'): + if alias.startswith("alias/"): return alias - return 'alias/' + alias + return "alias/" + alias def get_kms_tags(connection, module, key_id): @@ -386,13 +384,13 @@ def get_kms_tags(connection, module, key_id): while more: try: tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) - tags.extend(tag_response['Tags']) - except is_boto3_error_code('AccessDeniedException'): + tags.extend(tag_response["Tags"]) + except is_boto3_error_code("AccessDeniedException"): tag_response = {} except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key tags") - if tag_response.get('NextMarker'): - kwargs['Marker'] = tag_response['NextMarker'] + if tag_response.get("NextMarker"): + kwargs["Marker"] = tag_response["NextMarker"] else: more = False return tags @@ -400,29 +398,28 @@ def get_kms_tags(connection, module, key_id): def get_kms_policies(connection, module, key_id): try: - policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] - return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for - policy in policies] - except is_boto3_error_code('AccessDeniedException'): + policies = list_key_policies_with_backoff(connection, key_id)["PolicyNames"] + return [get_key_policy_with_backoff(connection, key_id, policy)["Policy"] for policy in policies] + except is_boto3_error_code("AccessDeniedException"): return [] except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key policies") def key_matches_filter(key, filtr): - if filtr[0] == 'key-id': - return filtr[1] == key['key_id'] - if filtr[0] == 'tag-key': - return filtr[1] in key['tags'] - if filtr[0] == 'tag-value': - return filtr[1] in key['tags'].values() - if filtr[0] == 'alias': - return filtr[1] in key['aliases'] - if filtr[0].startswith('tag:'): + if filtr[0] == "key-id": + return filtr[1] == key["key_id"] + if filtr[0] == "tag-key": + return filtr[1] in key["tags"] + if filtr[0] == "tag-value": + return filtr[1] in key["tags"].values() + if filtr[0] == "alias": + return filtr[1] in key["aliases"] + if filtr[0].startswith("tag:"): tag_key = filtr[0][4:] - if tag_key not in key['tags']: + if tag_key not in key["tags"]: return False - return key['tags'].get(tag_key) == filtr[1] + return key["tags"].get(tag_key) == filtr[1] def key_matches_filters(key, filters): @@ -436,96 +433,111 @@ def get_key_details(connection, module, key_id, tokens=None): if not tokens: tokens = [] try: - result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + result = get_kms_metadata_with_backoff(connection, key_id)["KeyMetadata"] # Make sure we have the canonical ARN, we might have been passed an alias - key_id = result['Arn'] - except is_boto3_error_code('NotFoundException'): + key_id = result["Arn"] + except is_boto3_error_code("NotFoundException"): return None - except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except - module.warn('Permission denied fetching key metadata ({0})'.format(key_id)) + except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except + module.warn(f"Permission denied fetching key metadata ({key_id})") return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key metadata") - result['KeyArn'] = result.pop('Arn') + result["KeyArn"] = result.pop("Arn") try: aliases = get_kms_aliases_lookup(connection) - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied fetching key aliases') + except is_boto3_error_code("AccessDeniedException"): + module.warn("Permission denied fetching key aliases") aliases = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain aliases") # We can only get aliases for our own account, so we don't need the full ARN - result['aliases'] = aliases.get(result['KeyId'], []) - result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) + result["aliases"] = aliases.get(result["KeyId"], []) + result["enable_key_rotation"] = get_enable_key_rotation_with_backoff(connection, key_id) - if module.params.get('pending_deletion'): + if module.params.get("pending_deletion"): return camel_dict_to_snake_dict(result) try: - result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants'] - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied fetching key grants ({0})'.format(key_id)) - result['grants'] = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + result["grants"] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)["Grants"] + except is_boto3_error_code("AccessDeniedException"): + module.warn(f"Permission denied fetching key grants ({key_id})") + result["grants"] = [] + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key grants") tags = get_kms_tags(connection, module, key_id) result = camel_dict_to_snake_dict(result) - result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') - result['policies'] = get_kms_policies(connection, module, key_id) - result['key_policies'] = [json.loads(policy) for policy in result['policies']] + result["tags"] = boto3_tag_list_to_ansible_dict(tags, "TagKey", "TagValue") + result["policies"] = get_kms_policies(connection, module, key_id) + result["key_policies"] = [json.loads(policy) for policy in result["policies"]] return result def get_kms_info(connection, module): - if module.params.get('key_id'): - key_id = module.params.get('key_id') + if module.params.get("key_id"): + key_id = module.params.get("key_id") details = get_key_details(connection, module, key_id) if details: return [details] return [] - elif module.params.get('alias'): - alias = canonicalize_alias_name(module.params.get('alias')) + elif module.params.get("alias"): + alias = canonicalize_alias_name(module.params.get("alias")) details = get_key_details(connection, module, alias) if details: return [details] return [] else: try: - keys = get_kms_keys_with_backoff(connection)['Keys'] + keys = get_kms_keys_with_backoff(connection)["Keys"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain keys") - return [get_key_details(connection, module, key['KeyId']) for key in keys] + return [get_key_details(connection, module, key["KeyId"]) for key in keys] def main(): argument_spec = dict( - alias=dict(aliases=['key_alias']), - key_id=dict(aliases=['key_arn']), - filters=dict(type='dict'), - pending_deletion=dict(type='bool', default=False), + alias=dict(aliases=["key_alias"]), + key_id=dict(aliases=["key_arn"]), + filters=dict(type="dict"), + pending_deletion=dict(type="bool", default=False), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['alias', 'filters', 'key_id']], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, mutually_exclusive=[["alias", "filters", "key_id"]], supports_check_mode=True + ) try: - connection = module.client('kms') + connection = module.client("kms") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", - date='2024-05-01', collection_name='amazon.aws') + module.fail_json_aws(e, msg="Failed to connect to AWS") + + module.deprecate( + ( + "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned" + " for now." + ), + date="2024-05-01", + collection_name="amazon.aws", + ) all_keys = get_kms_info(connection, module) - filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params['filters'])] + filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params["filters"])] ret_params = dict(kms_keys=filtered_keys) module.exit_json(**ret_params) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda.py b/ansible_collections/amazon/aws/plugins/modules/lambda.py index 2c46a7ef5..5c30b34fd 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: lambda version_added: 5.0.0 @@ -115,7 +113,6 @@ options: description: - The instruction set architecture that the function supports. - Requires one of I(s3_bucket) or I(zip_file). - - Requires botocore >= 1.21.51. type: str choices: ['x86_64', 'arm64'] aliases: ['architectures'] @@ -145,16 +142,23 @@ options: type: list elements: dict version_added: 5.5.0 + image_uri: + description: + - The Amazon ECR URI of the image to use. + - Required (alternative to runtime zip_file and s3_bucket) when creating a function. + - Required when I(state=present). + type: str + version_added: 7.3.0 author: - 'Steyn Huizinga (@steynovich)' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create Lambda functions - name: looped creation amazon.aws.lambda: @@ -165,11 +169,11 @@ EXAMPLES = r''' role: 'arn:aws:iam::123456789012:role/lambda_basic_execution' handler: 'hello_python.my_handler' vpc_subnet_ids: - - subnet-123abcde - - subnet-edcba321 + - subnet-123abcde + - subnet-edcba321 vpc_security_group_ids: - - sg-123abcde - - sg-edcba321 + - sg-123abcde + - sg-edcba321 environment_variables: '{{ item.env_vars }}' tags: key1: 'value1' @@ -215,10 +219,10 @@ EXAMPLES = r''' role: 'arn:aws:iam::123456789012:role/lambda_basic_execution' handler: 'hello_python.my_handler' layers: - - layer_version_arn: 'arn:aws:lambda:us-east-1:123456789012:layer:python27-env:7' -''' + - layer_version_arn: 'arn:aws:lambda:us-east-1:123456789012:layer:python27-env:7' +""" -RETURN = r''' +RETURN = r""" code: description: The lambda function's code returned by get_function in boto3. returned: success @@ -243,7 +247,6 @@ configuration: contains: architectures: description: The architectures supported by the function. - returned: successful run where botocore >= 1.21.51 type: list elements: str sample: ['arm64'] @@ -389,61 +392,28 @@ configuration: description: The Amazon Resource Name (ARN) of a signing job. returned: always type: str -''' +""" import base64 import hashlib -import traceback import re +import traceback from collections import Counter try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError except ImportError: pass # protected by AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - - -def get_account_info(module): - """return the account information (account id and partition) we are currently working on - - get_account_info tries too find out the account that we are working - on. It's not guaranteed that this will be easy so we try in - several different ways. Giving either IAM or STS privileges to - the account should be enough to permit this. - """ - account_id = None - partition = None - try: - sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) - caller_id = sts_client.get_caller_identity(aws_retry=True) - account_id = caller_id.get('Account') - partition = caller_id.get('Arn').split(':')[1] - except (BotoCoreError, ClientError): - try: - iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - arn, partition, service, reg, account_id, resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':') - except is_boto3_error_code('AccessDenied') as e: - try: - except_msg = to_native(e.message) - except AttributeError: - except_msg = to_native(e) - m = re.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/", except_msg) - if m is None: - module.fail_json_aws(e, msg="getting account information") - account_id = m.group(4) - partition = m.group(1) - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="getting account information") - - return account_id, partition +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def get_current_function(connection, function_name, qualifier=None): @@ -451,43 +421,42 @@ def get_current_function(connection, function_name, qualifier=None): if qualifier is not None: return connection.get_function(FunctionName=function_name, Qualifier=qualifier, aws_retry=True) return connection.get_function(FunctionName=function_name, aws_retry=True) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None def get_layer_version_arn(module, connection, layer_name, version_number): try: - layer_versions = connection.list_layer_versions(LayerName=layer_name, aws_retry=True)['LayerVersions'] + layer_versions = connection.list_layer_versions(LayerName=layer_name, aws_retry=True)["LayerVersions"] for v in layer_versions: if v["Version"] == version_number: return v["LayerVersionArn"] - module.fail_json(msg='Unable to find version {0} from Lambda layer {1}'.format(version_number, layer_name)) - except is_boto3_error_code('ResourceNotFoundException'): - module.fail_json(msg='Lambda layer {0} not found'.format(layer_name)) + module.fail_json(msg=f"Unable to find version {version_number} from Lambda layer {layer_name}") + except is_boto3_error_code("ResourceNotFoundException"): + module.fail_json(msg=f"Lambda layer {layer_name} not found") def sha256sum(filename): hasher = hashlib.sha256() - with open(filename, 'rb') as f: + with open(filename, "rb") as f: hasher.update(f.read()) code_hash = hasher.digest() code_b64 = base64.b64encode(code_hash) - hex_digest = code_b64.decode('utf-8') + hex_digest = code_b64.decode("utf-8") return hex_digest def set_tag(client, module, tags, function, purge_tags): - if tags is None: return False changed = False - arn = function['Configuration']['FunctionArn'] + arn = function["Configuration"]["FunctionArn"] try: - current_tags = client.list_tags(Resource=arn, aws_retry=True).get('Tags', {}) + current_tags = client.list_tags(Resource=arn, aws_retry=True).get("Tags", {}) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to list tags") @@ -504,7 +473,7 @@ def set_tag(client, module, tags, function, purge_tags): client.untag_resource( Resource=arn, TagKeys=tags_to_remove, - aws_retry=True + aws_retry=True, ) changed = True @@ -512,26 +481,26 @@ def set_tag(client, module, tags, function, purge_tags): client.tag_resource( Resource=arn, Tags=tags_to_add, - aws_retry=True + aws_retry=True, ) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to tag resource {0}".format(arn)) + module.fail_json_aws(e, msg=f"Unable to tag resource {arn}") return changed def wait_for_lambda(client, module, name): try: - client_active_waiter = client.get_waiter('function_active') - client_updated_waiter = client.get_waiter('function_updated') + client_active_waiter = client.get_waiter("function_active") + client_updated_waiter = client.get_waiter("function_updated") client_active_waiter.wait(FunctionName=name) client_updated_waiter.wait(FunctionName=name) except WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on lambda to finish updating') + module.fail_json_aws(e, msg="Timeout while waiting on lambda to finish updating") except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on lambda to finish updating') + module.fail_json_aws(e, msg="Failed while waiting on lambda to finish updating") def format_response(response): @@ -549,13 +518,13 @@ def _zip_args(zip_file, current_config, ignore_checksum): # If there's another change that needs to happen, we always re-upload the code if not ignore_checksum: local_checksum = sha256sum(zip_file) - remote_checksum = current_config.get('CodeSha256', '') + remote_checksum = current_config.get("CodeSha256", "") if local_checksum == remote_checksum: return {} - with open(zip_file, 'rb') as f: + with open(zip_file, "rb") as f: zip_content = f.read() - return {'ZipFile': zip_content} + return {"ZipFile": zip_content} def _s3_args(s3_bucket, s3_key, s3_object_version): @@ -564,27 +533,34 @@ def _s3_args(s3_bucket, s3_key, s3_object_version): if not s3_key: return {} - code = {'S3Bucket': s3_bucket, - 'S3Key': s3_key} + code = {"S3Bucket": s3_bucket, "S3Key": s3_key} if s3_object_version: - code.update({'S3ObjectVersion': s3_object_version}) + code.update({"S3ObjectVersion": s3_object_version}) return code +def _image_args(image_uri): + if not image_uri: + return {} + + code = {"ImageUri": image_uri} + return code + + def _code_args(module, current_config): - s3_bucket = module.params.get('s3_bucket') - s3_key = module.params.get('s3_key') - s3_object_version = module.params.get('s3_object_version') - zip_file = module.params.get('zip_file') - architectures = module.params.get('architecture') - checksum_match = False + s3_bucket = module.params.get("s3_bucket") + s3_key = module.params.get("s3_key") + s3_object_version = module.params.get("s3_object_version") + zip_file = module.params.get("zip_file") + architectures = module.params.get("architecture") + image_uri = module.params.get("image_uri") code_kwargs = {} - if architectures and current_config.get('Architectures', None) != [architectures]: - module.warn('Arch Change') - code_kwargs.update({'Architectures': [architectures]}) + if architectures and current_config.get("Architectures", None) != [architectures]: + module.warn("Arch Change") + code_kwargs.update({"Architectures": [architectures]}) try: code_kwargs.update(_zip_args(zip_file, current_config, bool(code_kwargs))) @@ -592,12 +568,13 @@ def _code_args(module, current_config): module.fail_json(msg=str(e), exception=traceback.format_exc()) code_kwargs.update(_s3_args(s3_bucket, s3_key, s3_object_version)) + code_kwargs.update(_image_args(image_uri)) if not code_kwargs: return {} - if not architectures and current_config.get('Architectures', None): - code_kwargs.update({'Architectures': current_config.get('Architectures', None)}) + if not architectures and current_config.get("Architectures", None): + code_kwargs.update({"Architectures": current_config.get("Architectures", None)}) return code_kwargs @@ -605,180 +582,191 @@ def _code_args(module, current_config): def main(): argument_spec = dict( name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), + image_uri=dict(), runtime=dict(), role=dict(), handler=dict(), - zip_file=dict(aliases=['src']), + zip_file=dict(aliases=["src"]), s3_bucket=dict(), s3_key=dict(no_log=False), s3_object_version=dict(), - description=dict(default=''), - timeout=dict(type='int', default=3), - memory_size=dict(type='int', default=128), - vpc_subnet_ids=dict(type='list', elements='str'), - vpc_security_group_ids=dict(type='list', elements='str'), - environment_variables=dict(type='dict'), + description=dict(default=""), + timeout=dict(type="int", default=3), + memory_size=dict(type="int", default=128), + vpc_subnet_ids=dict(type="list", elements="str"), + vpc_security_group_ids=dict(type="list", elements="str"), + environment_variables=dict(type="dict"), dead_letter_arn=dict(), - kms_key_arn=dict(type='str', no_log=False), - tracing_mode=dict(choices=['Active', 'PassThrough']), - architecture=dict(choices=['x86_64', 'arm64'], type='str', aliases=['architectures']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + kms_key_arn=dict(type="str", no_log=False), + tracing_mode=dict(choices=["Active", "PassThrough"]), + architecture=dict(choices=["x86_64", "arm64"], type="str", aliases=["architectures"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), layers=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - layer_version_arn=dict(type='str'), - layer_name=dict(type='str', aliases=['layer_arn']), - version=dict(type='int', aliases=['layer_version']), + layer_version_arn=dict(type="str"), + layer_name=dict(type="str", aliases=["layer_arn"]), + version=dict(type="int", aliases=["layer_version"]), ), - required_together=[['layer_name', 'version']], - required_one_of=[['layer_version_arn', 'layer_name']], - mutually_exclusive=[ - ['layer_name', 'layer_version_arn'], - ['version', 'layer_version_arn'] - ], + required_together=[["layer_name", "version"]], + required_one_of=[["layer_version_arn", "layer_name"]], + mutually_exclusive=[["layer_name", "layer_version_arn"], ["version", "layer_version_arn"]], ), ) - mutually_exclusive = [['zip_file', 's3_key'], - ['zip_file', 's3_bucket'], - ['zip_file', 's3_object_version']] + mutually_exclusive = [ + ["zip_file", "s3_key"], + ["zip_file", "s3_bucket"], + ["zip_file", "s3_object_version"], + ["image_uri", "zip_file"], + ["image_uri", "runtime"], + ["image_uri", "handler"], + ["image_uri", "s3_key"], + ["image_uri", "s3_bucket"], + ["image_uri", "s3_object_version"], + ] + + required_by = {"runtime": ["handler"]} - required_together = [['s3_key', 's3_bucket'], - ['vpc_subnet_ids', 'vpc_security_group_ids']] + required_together = [ + ["s3_key", "s3_bucket"], + ["vpc_subnet_ids", "vpc_security_group_ids"], + ["runtime", "handler"], + ] required_if = [ - ['state', 'present', ['runtime', 'handler', 'role']], - ['architecture', 'x86_64', ['zip_file', 's3_bucket'], True], - ['architecture', 'arm64', ['zip_file', 's3_bucket'], True], + ["state", "present", ["role"]], + ["state", "present", ["runtime", "image_uri"], True], + ["architecture", "x86_64", ["zip_file", "s3_bucket", "image_uri"], True], + ["architecture", "arm64", ["zip_file", "s3_bucket", "image_uri"], True], ] - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_together=required_together, - required_if=required_if) - - name = module.params.get('name') - state = module.params.get('state').lower() - runtime = module.params.get('runtime') - role = module.params.get('role') - handler = module.params.get('handler') - s3_bucket = module.params.get('s3_bucket') - s3_key = module.params.get('s3_key') - s3_object_version = module.params.get('s3_object_version') - zip_file = module.params.get('zip_file') - description = module.params.get('description') - timeout = module.params.get('timeout') - memory_size = module.params.get('memory_size') - vpc_subnet_ids = module.params.get('vpc_subnet_ids') - vpc_security_group_ids = module.params.get('vpc_security_group_ids') - environment_variables = module.params.get('environment_variables') - dead_letter_arn = module.params.get('dead_letter_arn') - tracing_mode = module.params.get('tracing_mode') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - kms_key_arn = module.params.get('kms_key_arn') - architectures = module.params.get('architecture') + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_if=required_if, + ) + + name = module.params.get("name") + state = module.params.get("state").lower() + runtime = module.params.get("runtime") + role = module.params.get("role") + handler = module.params.get("handler") + description = module.params.get("description") + timeout = module.params.get("timeout") + memory_size = module.params.get("memory_size") + vpc_subnet_ids = module.params.get("vpc_subnet_ids") + vpc_security_group_ids = module.params.get("vpc_security_group_ids") + environment_variables = module.params.get("environment_variables") + dead_letter_arn = module.params.get("dead_letter_arn") + tracing_mode = module.params.get("tracing_mode") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + kms_key_arn = module.params.get("kms_key_arn") + architectures = module.params.get("architecture") + image_uri = module.params.get("image_uri") layers = [] check_mode = module.check_mode changed = False - if architectures: - module.require_botocore_at_least( - '1.21.51', reason='to configure the architectures that the function supports.') - try: - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Trying to connect to AWS") - if state == 'present': - if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role): + if state == "present": + if re.match(r"^arn:aws(-([a-z\-]+))?:iam", role): role_arn = role else: # get account ID and assemble ARN - account_id, partition = get_account_info(module) - role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role) + account_id, partition = get_aws_account_info(module) + role_arn = f"arn:{partition}:iam::{account_id}:role/{role}" # create list of layer version arn if module.params.get("layers"): for layer in module.params.get("layers"): layer_version_arn = layer.get("layer_version_arn") if layer_version_arn is None: - layer_version_arn = get_layer_version_arn(module, client, layer.get("layer_name"), layer.get("version")) + layer_version_arn = get_layer_version_arn( + module, client, layer.get("layer_name"), layer.get("version") + ) layers.append(layer_version_arn) # Get function configuration if present, False otherwise current_function = get_current_function(client, name) # Update existing Lambda function - if state == 'present' and current_function: - + if state == "present" and current_function: # Get current state - current_config = current_function['Configuration'] + current_config = current_function["Configuration"] current_version = None # Update function configuration - func_kwargs = {'FunctionName': name} + func_kwargs = {"FunctionName": name} # Update configuration if needed - if role_arn and current_config['Role'] != role_arn: - func_kwargs.update({'Role': role_arn}) - if handler and current_config['Handler'] != handler: - func_kwargs.update({'Handler': handler}) - if description and current_config['Description'] != description: - func_kwargs.update({'Description': description}) - if timeout and current_config['Timeout'] != timeout: - func_kwargs.update({'Timeout': timeout}) - if memory_size and current_config['MemorySize'] != memory_size: - func_kwargs.update({'MemorySize': memory_size}) - if runtime and current_config['Runtime'] != runtime: - func_kwargs.update({'Runtime': runtime}) - if (environment_variables is not None) and (current_config.get( - 'Environment', {}).get('Variables', {}) != environment_variables): - func_kwargs.update({'Environment': {'Variables': environment_variables}}) + if role_arn and current_config["Role"] != role_arn: + func_kwargs.update({"Role": role_arn}) + if handler and current_config["Handler"] != handler: + func_kwargs.update({"Handler": handler}) + if description and current_config["Description"] != description: + func_kwargs.update({"Description": description}) + if timeout and current_config["Timeout"] != timeout: + func_kwargs.update({"Timeout": timeout}) + if memory_size and current_config["MemorySize"] != memory_size: + func_kwargs.update({"MemorySize": memory_size}) + if image_uri is not None and current_config["PackageType"] != "Image": + func_kwargs.update({"PackageType": "Image"}) + if runtime and current_config["Runtime"] != runtime: + func_kwargs.update({"Runtime": runtime}) + if (environment_variables is not None) and ( + current_config.get("Environment", {}).get("Variables", {}) != environment_variables + ): + func_kwargs.update({"Environment": {"Variables": environment_variables}}) if dead_letter_arn is not None: - if current_config.get('DeadLetterConfig'): - if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn: - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + if current_config.get("DeadLetterConfig"): + if current_config["DeadLetterConfig"]["TargetArn"] != dead_letter_arn: + func_kwargs.update({"DeadLetterConfig": {"TargetArn": dead_letter_arn}}) else: if dead_letter_arn != "": - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) - if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode): - func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + func_kwargs.update({"DeadLetterConfig": {"TargetArn": dead_letter_arn}}) + if tracing_mode and (current_config.get("TracingConfig", {}).get("Mode", "PassThrough") != tracing_mode): + func_kwargs.update({"TracingConfig": {"Mode": tracing_mode}}) if kms_key_arn: - func_kwargs.update({'KMSKeyArn': kms_key_arn}) + func_kwargs.update({"KMSKeyArn": kms_key_arn}) # If VPC configuration is desired if vpc_subnet_ids: - - if 'VpcConfig' in current_config: + if "VpcConfig" in current_config: # Compare VPC config with current config - current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds'] - current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds'] + current_vpc_subnet_ids = current_config["VpcConfig"]["SubnetIds"] + current_vpc_security_group_ids = current_config["VpcConfig"]["SecurityGroupIds"] subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids) - vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids) + vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted( + current_vpc_security_group_ids + ) - if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed: - new_vpc_config = {'SubnetIds': vpc_subnet_ids, - 'SecurityGroupIds': vpc_security_group_ids} - func_kwargs.update({'VpcConfig': new_vpc_config}) + if "VpcConfig" not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed: + new_vpc_config = {"SubnetIds": vpc_subnet_ids, "SecurityGroupIds": vpc_security_group_ids} + func_kwargs.update({"VpcConfig": new_vpc_config}) else: # No VPC configuration is desired, assure VPC config is empty when present in current config - if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'): - func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}}) + if "VpcConfig" in current_config and current_config["VpcConfig"].get("VpcId"): + func_kwargs.update({"VpcConfig": {"SubnetIds": [], "SecurityGroupIds": []}}) # Check layers if layers: # compare two lists to see if the target layers are equal to the current - current_layers = current_config.get('Layers', []) - if Counter(layers) != Counter((f['Arn'] for f in current_layers)): - func_kwargs.update({'Layers': layers}) + current_layers = current_config.get("Layers", []) + if Counter(layers) != Counter((f["Arn"] for f in current_layers)): + func_kwargs.update({"Layers": layers}) # Upload new configuration if configuration has changed if len(func_kwargs) > 1: @@ -788,7 +776,7 @@ def main(): try: if not check_mode: response = client.update_function_configuration(aws_retry=True, **func_kwargs) - current_version = response['Version'] + current_version = response["Version"] changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to update lambda configuration") @@ -800,9 +788,8 @@ def main(): code_kwargs = _code_args(module, current_config) if code_kwargs: - # Update code configuration - code_kwargs.update({'FunctionName': name, 'Publish': True}) + code_kwargs.update({"FunctionName": name, "Publish": True}) if not check_mode: wait_for_lambda(client, module, name) @@ -810,7 +797,7 @@ def main(): try: if not check_mode: response = client.update_function_code(aws_retry=True, **code_kwargs) - current_version = response['Version'] + current_version = response["Version"] changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to upload new code") @@ -818,59 +805,63 @@ def main(): # Describe function code and configuration response = get_current_function(client, name, qualifier=current_version) if not response: - module.fail_json(msg='Unable to get function information after updating') + module.fail_json(msg="Unable to get function information after updating") response = format_response(response) # We're done module.exit_json(changed=changed, code_kwargs=code_kwargs, func_kwargs=func_kwargs, **response) - # Function doesn't exists, create new Lambda function - elif state == 'present': - - func_kwargs = {'FunctionName': name, - 'Publish': True, - 'Runtime': runtime, - 'Role': role_arn, - 'Timeout': timeout, - 'MemorySize': memory_size, - } + # Function doesn't exist, create new Lambda function + elif state == "present": + func_kwargs = { + "FunctionName": name, + "Publish": True, + "Role": role_arn, + "Timeout": timeout, + "MemorySize": memory_size, + } code = _code_args(module, {}) if not code: - module.fail_json(msg='Either S3 object or path to zipfile required') - if 'Architectures' in code: - func_kwargs.update({'Architectures': code.pop('Architectures')}) - func_kwargs.update({'Code': code}) + module.fail_json(msg="Either S3 object or path to zipfile required") + if "Architectures" in code: + func_kwargs.update({"Architectures": code.pop("Architectures")}) + func_kwargs.update({"Code": code}) if description is not None: - func_kwargs.update({'Description': description}) + func_kwargs.update({"Description": description}) + + if image_uri is not None: + func_kwargs.update({"PackageType": "Image"}) + + if runtime is not None: + func_kwargs.update({"Runtime": runtime}) if handler is not None: - func_kwargs.update({'Handler': handler}) + func_kwargs.update({"Handler": handler}) if environment_variables: - func_kwargs.update({'Environment': {'Variables': environment_variables}}) + func_kwargs.update({"Environment": {"Variables": environment_variables}}) if dead_letter_arn: - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + func_kwargs.update({"DeadLetterConfig": {"TargetArn": dead_letter_arn}}) if tracing_mode: - func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + func_kwargs.update({"TracingConfig": {"Mode": tracing_mode}}) if kms_key_arn: - func_kwargs.update({'KMSKeyArn': kms_key_arn}) + func_kwargs.update({"KMSKeyArn": kms_key_arn}) # If VPC configuration is given if vpc_subnet_ids: - func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, - 'SecurityGroupIds': vpc_security_group_ids}}) + func_kwargs.update({"VpcConfig": {"SubnetIds": vpc_subnet_ids, "SecurityGroupIds": vpc_security_group_ids}}) # Layers if layers: - func_kwargs.update({'Layers': layers}) + func_kwargs.update({"Layers": layers}) # Tag Function if tags: - func_kwargs.update({'Tags': tags}) + func_kwargs.update({"Tags": tags}) # Function would have been created if not check mode if check_mode: @@ -880,19 +871,19 @@ def main(): current_version = None try: response = client.create_function(aws_retry=True, **func_kwargs) - current_version = response['Version'] + current_version = response["Version"] changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to create function") response = get_current_function(client, name, qualifier=current_version) if not response: - module.fail_json(msg='Unable to get function information after creating') + module.fail_json(msg="Unable to get function information after creating") response = format_response(response) module.exit_json(changed=changed, **response) # Delete existing Lambda function - if state == 'absent' and current_function: + if state == "absent" and current_function: try: if not check_mode: client.delete_function(FunctionName=name, aws_retry=True) @@ -903,9 +894,9 @@ def main(): module.exit_json(changed=changed) # Function already absent, do nothing - elif state == 'absent': + elif state == "absent": module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py b/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py index e2dd776d6..5b16eebd3 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_alias version_added: 5.0.0 @@ -50,13 +48,12 @@ options: type: int default: 0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" --- # Simple example to create a lambda function and publish a version - hosts: localhost @@ -68,58 +65,58 @@ EXAMPLES = ''' account: 123456789012 production_version: 5 tasks: - - name: AWS Lambda Function - amazon.aws.lambda: - state: "{{ state | default('present') }}" - name: myLambdaFunction - publish: True - description: lambda function description - code_s3_bucket: package-bucket - code_s3_key: "lambda/{{ deployment_package }}" - local_path: "{{ project_folder }}/{{ deployment_package }}" - runtime: python2.7 - timeout: 5 - handler: lambda.handler - memory_size: 128 - role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole" - - - name: Get information - amazon.aws.lambda_info: - name: myLambdaFunction - register: lambda_info - - name: show results - ansible.builtin.debug: - msg: "{{ lambda_info['lambda_facts'] }}" - -# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) - - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} " - amazon.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: Dev - description: Development is $LATEST version - -# The QA alias will only be created when a new version is published (i.e. not = '$LATEST') - - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} " - amazon.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: QA - version: "{{ lambda_info.lambda_facts.Version }}" - description: "QA is version {{ lambda_info.lambda_facts.Version }}" - when: lambda_info.lambda_facts.Version != "$LATEST" - -# The Prod alias will have a fixed version based on a variable - - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} " - amazon.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: Prod - version: "{{ production_version }}" - description: "Production is version {{ production_version }}" -''' - -RETURN = ''' + - name: AWS Lambda Function + amazon.aws.lambda: + state: "{{ state | default('present') }}" + name: myLambdaFunction + publish: true + description: lambda function description + code_s3_bucket: package-bucket + code_s3_key: "lambda/{{ deployment_package }}" + local_path: "{{ project_folder }}/{{ deployment_package }}" + runtime: python2.7 + timeout: 5 + handler: lambda.handler + memory_size: 128 + role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole" + + - name: Get information + amazon.aws.lambda_info: + name: myLambdaFunction + register: lambda_info + - name: show results + ansible.builtin.debug: + msg: "{{ lambda_info['lambda_facts'] }}" + + # The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) + - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: Dev + description: Development is $LATEST version + + # The QA alias will only be created when a new version is published (i.e. not = '$LATEST') + - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: QA + version: "{{ lambda_info.lambda_facts.Version }}" + description: "QA is version {{ lambda_info.lambda_facts.Version }}" + when: lambda_info.lambda_facts.Version != "$LATEST" + + # The Prod alias will have a fixed version based on a variable + - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: Prod + version: "{{ production_version }}" + description: "Production is version {{ production_version }}" +""" + +RETURN = r""" --- alias_arn: description: Full ARN of the function, including the alias @@ -146,7 +143,7 @@ revision_id: returned: success type: str sample: 12345678-1234-1234-1234-123456789abc -''' +""" import re @@ -158,142 +155,155 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +class LambdaAnsibleAWSError(AnsibleAWSError): + pass -def set_api_params(module, module_params): +def set_api_params(module_params, param_names): """ Sets non-None module parameters to those expected by the boto3 API. - :param module: :param module_params: + :param param_names: :return: """ api_params = dict() - for param in module_params: - module_param = module.params.get(param, None) + for param in param_names: + module_param = module_params.get(param, None) if module_param: api_params[param] = module_param return snake_dict_to_camel_dict(api_params, capitalize_first=True) -def validate_params(module): +def validate_params(module_params): """ Performs basic parameter validation. - :param module: AnsibleAWSModule reference + :param module_params: AnsibleAWSModule Parameters :return: """ - function_name = module.params['function_name'] + function_name = module_params["function_name"] # validate function name - if not re.search(r'^[\w\-:]+$', function_name): - module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + if not re.search(r"^[\w\-:]+$", function_name): + raise LambdaAnsibleAWSError( + f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens." ) if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + raise LambdaAnsibleAWSError(f"Function name '{function_name}' exceeds 64 character limit") + return + + +def normalize_params(module_params): + params = dict(module_params) # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string - if module.params['function_version'] == 0: - module.params['function_version'] = '$LATEST' + if params["function_version"] == 0: + params["function_version"] = "$LATEST" else: - module.params['function_version'] = str(module.params['function_version']) + params["function_version"] = str(params["function_version"]) - return + return params -def get_lambda_alias(module, client): +def get_lambda_alias(module_params, client): """ Returns the lambda function alias if it exists. - :param module: AnsibleAWSModule + :param module_params: AnsibleAWSModule parameters :param client: (wrapped) boto3 lambda client :return: """ # set API parameters - api_params = set_api_params(module, ('function_name', 'name')) + api_params = set_api_params(module_params, ("function_name", "name")) # check if alias exists and get facts try: results = client.get_alias(aws_retry=True, **api_params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): results = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Error retrieving function alias') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + raise LambdaAnsibleAWSError("Error retrieving function alias", exception=e) return results -def lambda_alias(module, client): +def lambda_alias(module_params, client, check_mode): """ Adds, updates or deletes lambda function aliases. - :param module: AnsibleAWSModule + :param module_params: AnsibleAWSModule parameters :param client: (wrapped) boto3 lambda client :return dict: """ results = dict() changed = False - current_state = 'absent' - state = module.params['state'] + current_state = "absent" + state = module_params["state"] - facts = get_lambda_alias(module, client) + facts = get_lambda_alias(module_params, client) if facts: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": snake_facts = camel_dict_to_snake_dict(facts) # check if alias has changed -- only version and description can change - alias_params = ('function_version', 'description') + alias_params = ("function_version", "description") for param in alias_params: - if module.params.get(param) is None: + if module_params.get(param) is None: continue - if module.params.get(param) != snake_facts.get(param): + if module_params.get(param) != snake_facts.get(param): changed = True break if changed: - api_params = set_api_params(module, ('function_name', 'name')) - api_params.update(set_api_params(module, alias_params)) + api_params = set_api_params(module_params, ("function_name", "name")) + api_params.update(set_api_params(module_params, alias_params)) - if not module.check_mode: + if not check_mode: try: results = client.update_alias(aws_retry=True, **api_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error updating function alias') + raise LambdaAnsibleAWSError("Error updating function alias", exception=e) else: # create new function alias - api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description')) + api_params = set_api_params(module_params, ("function_name", "name", "function_version", "description")) try: - if not module.check_mode: + if not check_mode: results = client.create_alias(aws_retry=True, **api_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error creating function alias') + raise LambdaAnsibleAWSError("Error creating function alias", exception=e) else: # state = 'absent' - if current_state == 'present': + if current_state == "present": # delete the function - api_params = set_api_params(module, ('function_name', 'name')) + api_params = set_api_params(module_params, ("function_name", "name")) try: - if not module.check_mode: + if not check_mode: results = client.delete_alias(aws_retry=True, **api_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error deleting function alias') + raise LambdaAnsibleAWSError("Error deleting function alias", exception=e) return dict(changed=changed, **dict(results or facts or {})) @@ -305,10 +315,10 @@ def main(): :return dict: ansible facts """ argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), function_name=dict(required=True), - name=dict(required=True, aliases=['alias_name']), - function_version=dict(type='int', required=False, default=0, aliases=['version']), + name=dict(required=True, aliases=["alias_name"]), + function_version=dict(type="int", required=False, default=0, aliases=["version"]), description=dict(required=False, default=None), ) @@ -319,13 +329,19 @@ def main(): required_together=[], ) - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) - validate_params(module) - results = lambda_alias(module, client) + try: + validate_params(module.params) + module_params = normalize_params(module.params) + results = lambda_alias(module_params, client, module.check_mode) + except LambdaAnsibleAWSError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) module.exit_json(**camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py index c6e63c4d8..c916ae8e8 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_event version_added: 5.0.0 @@ -89,13 +87,12 @@ options: required: true type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Example that creates a lambda event notification for a DynamoDB stream - name: DynamoDB stream event mapping amazon.aws.lambda_event: @@ -105,7 +102,7 @@ EXAMPLES = ''' alias: Dev source_params: source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 - enabled: True + enabled: true batch_size: 100 starting_position: TRIM_HORIZON register: event @@ -118,7 +115,7 @@ EXAMPLES = ''' function_name: "{{ function_name }}" source_params: source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 - enabled: True + enabled: true batch_size: 100 starting_position: LATEST function_response_types: @@ -128,29 +125,30 @@ EXAMPLES = ''' - name: Show source event ansible.builtin.debug: var: event.lambda_stream_events -''' +""" -RETURN = ''' +RETURN = r""" --- lambda_stream_events: description: list of dictionaries returned by the API describing stream event mappings returned: success type: list -''' +""" import re try: - from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError + from botocore.exceptions import ClientError + from botocore.exceptions import MissingParametersError + from botocore.exceptions import ParamValidationError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info - +from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule # --------------------------------------------------------------------------------------------------- # @@ -165,38 +163,35 @@ class AWSConnection: """ def __init__(self, ansible_obj, resources, use_boto3=True): - try: self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3) self.resource_client = dict() if not resources: - resources = ['lambda'] + resources = ["lambda"] - resources.append('iam') + resources.append("iam") for resource in resources: - aws_connect_kwargs.update(dict(region=self.region, - endpoint=self.endpoint, - conn_type='client', - resource=resource - )) + aws_connect_kwargs.update( + dict(region=self.region, endpoint=self.endpoint, conn_type="client", resource=resource) + ) self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) # if region is not provided, then get default profile/session region if not self.region: - self.region = self.resource_client['lambda'].meta.region_name + self.region = self.resource_client["lambda"].meta.region_name except (ClientError, ParamValidationError, MissingParametersError) as e: - ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) + ansible_obj.fail_json(msg=f"Unable to connect, authorize or access resource: {e}") # set account ID try: - self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] + self.account_id = self.resource_client["iam"].get_user()["User"]["Arn"].split(":")[4] except (ClientError, ValueError, KeyError, IndexError): - self.account_id = '' + self.account_id = "" - def client(self, resource='lambda'): + def client(self, resource="lambda"): return self.resource_client[resource] @@ -208,7 +203,7 @@ def pc(key): :return: """ - return "".join([token.capitalize() for token in key.split('_')]) + return "".join([token.capitalize() for token in key.split("_")]) def ordered_obj(obj): @@ -254,28 +249,28 @@ def validate_params(module, aws): :return: """ - function_name = module.params['lambda_function_arn'] + function_name = module.params["lambda_function_arn"] # validate function name - if not re.search(r'^[\w\-:]+$', function_name): + if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) - if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'): - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + if len(function_name) > 64 and not function_name.startswith("arn:aws:lambda:"): + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') - elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'): - module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name)) + elif len(function_name) > 140 and function_name.startswith("arn:aws:lambda:"): + module.fail_json(msg=f'ARN "{function_name}" exceeds 140 character limit') # check if 'function_name' needs to be expanded in full ARN format - if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'): - function_name = module.params['lambda_function_arn'] - module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name) + if not module.params["lambda_function_arn"].startswith("arn:aws:lambda:"): + function_name = module.params["lambda_function_arn"] + module.params["lambda_function_arn"] = f"arn:aws:lambda:{aws.region}:{aws.account_id}:function:{function_name}" qualifier = get_qualifier(module) if qualifier: - function_arn = module.params['lambda_function_arn'] - module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + function_arn = module.params["lambda_function_arn"] + module.params["lambda_function_arn"] = f"{function_arn}:{qualifier}" return @@ -289,10 +284,10 @@ def get_qualifier(module): """ qualifier = None - if module.params['version'] > 0: - qualifier = str(module.params['version']) - elif module.params['alias']: - qualifier = str(module.params['alias']) + if module.params["version"] > 0: + qualifier = str(module.params["version"]) + elif module.params["alias"]: + qualifier = str(module.params["alias"]) return qualifier @@ -306,6 +301,7 @@ def get_qualifier(module): # # --------------------------------------------------------------------------------------------------- + def lambda_event_stream(module, aws): """ Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications. @@ -314,49 +310,50 @@ def lambda_event_stream(module, aws): :return: """ - client = aws.client('lambda') + client = aws.client("lambda") facts = dict() changed = False - current_state = 'absent' - state = module.params['state'] + current_state = "absent" + state = module.params["state"] - api_params = dict(FunctionName=module.params['lambda_function_arn']) + api_params = dict(FunctionName=module.params["lambda_function_arn"]) # check if required sub-parameters are present and valid - source_params = module.params['source_params'] + source_params = module.params["source_params"] - source_arn = source_params.get('source_arn') + source_arn = source_params.get("source_arn") if source_arn: api_params.update(EventSourceArn=source_arn) else: module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.") # check if optional sub-parameters are valid, if present - batch_size = source_params.get('batch_size') + batch_size = source_params.get("batch_size") if batch_size: try: - source_params['batch_size'] = int(batch_size) + source_params["batch_size"] = int(batch_size) except ValueError: - module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size'])) + module.fail_json( + msg=f"Source parameter 'batch_size' must be an integer, found: {source_params['batch_size']}" + ) # optional boolean value needs special treatment as not present does not imply False - source_param_enabled = module.boolean(source_params.get('enabled', 'True')) + source_param_enabled = module.boolean(source_params.get("enabled", "True")) # check if event mapping exist try: - facts = client.list_event_source_mappings(**api_params)['EventSourceMappings'] + facts = client.list_event_source_mappings(**api_params)["EventSourceMappings"] if facts: - current_state = 'present' + current_state = "present" except ClientError as e: - module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e)) - - if state == 'present': - if current_state == 'absent': + module.fail_json(msg=f"Error retrieving stream event notification configuration: {e}") - starting_position = source_params.get('starting_position') + if state == "present": + if current_state == "absent": + starting_position = source_params.get("starting_position") if starting_position: api_params.update(StartingPosition=starting_position) - elif module.params.get('event_source') == 'sqs': + elif module.params.get("event_source") == "sqs": # starting position is not required for SQS pass else: @@ -364,37 +361,37 @@ def lambda_event_stream(module, aws): if source_arn: api_params.update(Enabled=source_param_enabled) - if source_params.get('batch_size'): - api_params.update(BatchSize=source_params.get('batch_size')) - if source_params.get('function_response_types'): - api_params.update(FunctionResponseTypes=source_params.get('function_response_types')) + if source_params.get("batch_size"): + api_params.update(BatchSize=source_params.get("batch_size")) + if source_params.get("function_response_types"): + api_params.update(FunctionResponseTypes=source_params.get("function_response_types")) try: if not module.check_mode: facts = client.create_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e)) + module.fail_json(msg=f"Error creating stream source event mapping: {e}") else: # current_state is 'present' - api_params = dict(FunctionName=module.params['lambda_function_arn']) + api_params = dict(FunctionName=module.params["lambda_function_arn"]) current_mapping = facts[0] - api_params.update(UUID=current_mapping['UUID']) + api_params.update(UUID=current_mapping["UUID"]) mapping_changed = False # check if anything changed - if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']: - api_params.update(BatchSize=source_params['batch_size']) + if source_params.get("batch_size") and source_params["batch_size"] != current_mapping["BatchSize"]: + api_params.update(BatchSize=source_params["batch_size"]) mapping_changed = True if source_param_enabled is not None: if source_param_enabled: - if current_mapping['State'] not in ('Enabled', 'Enabling'): + if current_mapping["State"] not in ("Enabled", "Enabling"): api_params.update(Enabled=True) mapping_changed = True else: - if current_mapping['State'] not in ('Disabled', 'Disabling'): + if current_mapping["State"] not in ("Disabled", "Disabling"): api_params.update(Enabled=False) mapping_changed = True @@ -404,19 +401,19 @@ def lambda_event_stream(module, aws): facts = client.update_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e)) + module.fail_json(msg=f"Error updating stream source event mapping: {e}") else: - if current_state == 'present': + if current_state == "present": # remove the stream event mapping - api_params = dict(UUID=facts[0]['UUID']) + api_params = dict(UUID=facts[0]["UUID"]) try: if not module.check_mode: facts = client.delete_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e)) + module.fail_json(msg=f"Error removing stream source event mapping: {e}") return camel_dict_to_snake_dict(dict(changed=changed, events=facts)) @@ -426,32 +423,32 @@ def main(): source_choices = ["stream", "sqs"] argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']), + state=dict(required=False, default="present", choices=["present", "absent"]), + lambda_function_arn=dict(required=True, aliases=["function_name", "function_arn"]), event_source=dict(required=False, default="stream", choices=source_choices), - source_params=dict(type='dict', required=True), + source_params=dict(type="dict", required=True), alias=dict(required=False, default=None), - version=dict(type='int', required=False, default=0), + version=dict(type="int", required=False, default=0), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['alias', 'version']], + mutually_exclusive=[["alias", "version"]], required_together=[], ) - aws = AWSConnection(module, ['lambda']) + aws = AWSConnection(module, ["lambda"]) validate_params(module, aws) - if module.params['event_source'].lower() in ('stream', 'sqs'): + if module.params["event_source"].lower() in ("stream", "sqs"): results = lambda_event_stream(module, aws) else: - module.fail_json(msg='Please select `stream` or `sqs` as the event type') + module.fail_json(msg="Please select `stream` or `sqs` as the event type") module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py b/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py index 68fff52b7..6b6ff11c5 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_execute version_added: 5.0.0 @@ -18,8 +16,8 @@ description: The usage did not change. - This module was originally added to C(community.aws) in release 1.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 author: - "Ryan Scott Brown (@ryansb) <ryansb@redhat.com>" @@ -73,9 +71,9 @@ options: - A dictionary in any form to be provided as input to the Lambda function. default: {} type: dict -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - amazon.aws.lambda_execute: name: test-function # the payload is automatically serialized and sent to the function @@ -109,9 +107,9 @@ EXAMPLES = ''' - amazon.aws.lambda_execute: name: test-function version_qualifier: PRODUCTION -''' +""" -RETURN = ''' +RETURN = r""" result: description: Resulting data structure from a successful task execution. returned: success @@ -131,7 +129,7 @@ result: type: int sample: 200 returned: always -''' +""" import base64 import json @@ -141,145 +139,152 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry def main(): argument_spec = dict( name=dict(), function_arn=dict(), - wait=dict(default=True, type='bool'), - tail_log=dict(default=False, type='bool'), - dry_run=dict(default=False, type='bool'), + wait=dict(default=True, type="bool"), + tail_log=dict(default=False, type="bool"), + dry_run=dict(default=False, type="bool"), version_qualifier=dict(), - payload=dict(default={}, type='dict'), + payload=dict(default={}, type="dict"), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['name', 'function_arn'], - ], - required_one_of=[ - ('name', 'function_arn') + ["name", "function_arn"], ], + required_one_of=[("name", "function_arn")], ) - name = module.params.get('name') - function_arn = module.params.get('function_arn') - await_return = module.params.get('wait') - dry_run = module.params.get('dry_run') - tail_log = module.params.get('tail_log') - version_qualifier = module.params.get('version_qualifier') - payload = module.params.get('payload') + name = module.params.get("name") + function_arn = module.params.get("function_arn") + await_return = module.params.get("wait") + dry_run = module.params.get("dry_run") + tail_log = module.params.get("tail_log") + version_qualifier = module.params.get("version_qualifier") + payload = module.params.get("payload") try: - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") invoke_params = {} if await_return: # await response - invoke_params['InvocationType'] = 'RequestResponse' + invoke_params["InvocationType"] = "RequestResponse" else: # fire and forget - invoke_params['InvocationType'] = 'Event' + invoke_params["InvocationType"] = "Event" if dry_run or module.check_mode: # dry_run overrides invocation type - invoke_params['InvocationType'] = 'DryRun' + invoke_params["InvocationType"] = "DryRun" if tail_log and await_return: - invoke_params['LogType'] = 'Tail' + invoke_params["LogType"] = "Tail" elif tail_log and not await_return: - module.fail_json(msg="The `tail_log` parameter is only available if " - "the invocation waits for the function to complete. " - "Set `wait` to true or turn off `tail_log`.") + module.fail_json( + msg=( + "The `tail_log` parameter is only available if " + "the invocation waits for the function to complete. " + "Set `wait` to true or turn off `tail_log`." + ) + ) else: - invoke_params['LogType'] = 'None' + invoke_params["LogType"] = "None" if version_qualifier: - invoke_params['Qualifier'] = version_qualifier + invoke_params["Qualifier"] = version_qualifier if payload: - invoke_params['Payload'] = json.dumps(payload) + invoke_params["Payload"] = json.dumps(payload) if function_arn: - invoke_params['FunctionName'] = function_arn + invoke_params["FunctionName"] = function_arn elif name: - invoke_params['FunctionName'] = name + invoke_params["FunctionName"] = name if module.check_mode: module.exit_json(changed=True) try: - wait_for_lambda(client, module, name) + wait_for_lambda(client, module, name or function_arn) response = client.invoke(**invoke_params, aws_retry=True) - except is_boto3_error_code('ResourceNotFoundException') as nfe: - module.fail_json_aws(nfe, msg="Could not find Lambda to execute. Make sure " - "the ARN is correct and your profile has " - "permissions to execute this function.") + except is_boto3_error_code("ResourceNotFoundException") as nfe: + module.fail_json_aws( + nfe, + msg=( + "Could not find Lambda to execute. Make sure " + "the ARN is correct and your profile has " + "permissions to execute this function." + ), + ) except botocore.exceptions.ClientError as ce: # pylint: disable=duplicate-except module.fail_json_aws(ce, msg="Client-side error when invoking Lambda, check inputs and specific error") except botocore.exceptions.ParamValidationError as ve: # pylint: disable=duplicate-except module.fail_json_aws(ve, msg="Parameters to `invoke` failed to validate") - except Exception as e: + except botocore.exceptions.BotoCoreError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unexpected failure while invoking Lambda function") results = { - 'logs': '', - 'status': response['StatusCode'], - 'output': '', + "logs": "", + "status": response["StatusCode"], + "output": "", } - if response.get('LogResult'): + if response.get("LogResult"): try: # logs are base64 encoded in the API response - results['logs'] = base64.b64decode(response.get('LogResult', '')) + results["logs"] = base64.b64decode(response.get("LogResult", "")) except Exception as e: module.fail_json_aws(e, msg="Failed while decoding logs") - if invoke_params['InvocationType'] == 'RequestResponse': + if invoke_params["InvocationType"] == "RequestResponse": try: - results['output'] = json.loads(response['Payload'].read().decode('utf8')) + results["output"] = json.loads(response["Payload"].read().decode("utf8")) except Exception as e: module.fail_json_aws(e, msg="Failed while decoding function return value") - if isinstance(results.get('output'), dict) and any( - [results['output'].get('stackTrace'), results['output'].get('errorMessage')]): + if isinstance(results.get("output"), dict) and any( + [results["output"].get("stackTrace"), results["output"].get("errorMessage")] + ): # AWS sends back stack traces and error messages when a function failed # in a RequestResponse (synchronous) context. - template = ("Function executed, but there was an error in the Lambda function. " - "Message: {errmsg}, Type: {type}, Stack Trace: {trace}") + template = ( + "Function executed, but there was an error in the Lambda function. " + "Message: {errmsg}, Type: {type}, Stack Trace: {trace}" + ) + error_data = { # format the stacktrace sent back as an array into a multiline string - 'trace': '\n'.join( - [' '.join([ - str(x) for x in line # cast line numbers to strings - ]) for line in results.get('output', {}).get('stackTrace', [])] - ), - 'errmsg': results['output'].get('errorMessage'), - 'type': results['output'].get('errorType') + "trace": "\n".join(results.get("output", {}).get("stackTrace", [])), + "errmsg": results["output"].get("errorMessage"), + "type": results["output"].get("errorType"), } module.fail_json(msg=template.format(**error_data), result=results) module.exit_json(changed=True, result=results) -def wait_for_lambda(client, module, name): +def wait_for_lambda(client, module, name_or_arn): try: - client_active_waiter = client.get_waiter('function_active') - client_updated_waiter = client.get_waiter('function_updated') - client_active_waiter.wait(FunctionName=name) - client_updated_waiter.wait(FunctionName=name) + client_active_waiter = client.get_waiter("function_active") + client_updated_waiter = client.get_waiter("function_updated") + client_active_waiter.wait(FunctionName=name_or_arn) + client_updated_waiter.wait(FunctionName=name_or_arn) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on lambda to be Active') + module.fail_json_aws(e, msg="Timeout while waiting on lambda to be Active") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on lambda to be Active') + module.fail_json_aws(e, msg="Failed while waiting on lambda to be Active") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py index 4584624d9..83ba4feaa 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_info version_added: 5.0.0 @@ -36,12 +34,12 @@ options: author: - Pierre Jodouin (@pjodouin) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" --- # Simple example of listing all info for a function - name: List all for a specific function @@ -66,9 +64,9 @@ EXAMPLES = ''' - name: show Lambda information ansible.builtin.debug: msg: "{{ output['function'] }}" -''' +""" -RETURN = ''' +RETURN = r""" --- function: description: @@ -267,7 +265,8 @@ functions: 'subnet_ids': [], 'vpc_id': '123' } -''' +""" + import json import re @@ -278,9 +277,9 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @AWSRetry.jittered_backoff() @@ -302,15 +301,29 @@ def alias_details(client, module, function_name): lambda_info = dict() try: - lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(aliases=_paginate(client, "list_aliases", FunctionName=function_name)["Aliases"]) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(aliases=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Trying to get aliases") return camel_dict_to_snake_dict(lambda_info) +def _get_query(query, function_name): + # create default values for query if not specified. + # if function name exists, query should default to 'all'. + # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas. + if query: + return query + if function_name: + return "all" + return "config" + + def list_functions(client, module): """ Returns queried facts for a specified function (or all functions). @@ -319,17 +332,17 @@ def list_functions(client, module): :param module: Ansible module reference """ - function_name = module.params.get('function_name') + function_name = module.params.get("function_name") if function_name: # Function name is specified - retrieve info on that function function_names = [function_name] else: # Function name is not specified - retrieve all function names - all_function_info = _paginate(client, 'list_functions')['Functions'] - function_names = [function_info['FunctionName'] for function_info in all_function_info] + all_function_info = _paginate(client, "list_functions")["Functions"] + function_names = [function_info["FunctionName"] for function_info in all_function_info] - query = module.params['query'] + query = _get_query(module.params["query"], function_name) functions = [] # keep returning deprecated response (dict of dicts) until removed @@ -342,22 +355,22 @@ def list_functions(client, module): # these details should be returned regardless of the query function.update(config_details(client, module, function_name)) - if query in ['all', 'aliases']: + if query in ["all", "aliases"]: function.update(alias_details(client, module, function_name)) - if query in ['all', 'policy']: + if query in ["all", "policy"]: function.update(policy_details(client, module, function_name)) - if query in ['all', 'versions']: + if query in ["all", "versions"]: function.update(version_details(client, module, function_name)) - if query in ['all', 'mappings']: + if query in ["all", "mappings"]: function.update(mapping_details(client, module, function_name)) - if query in ['all', 'tags']: + if query in ["all", "tags"]: function.update(tags_details(client, module, function_name)) - all_facts[function['function_name']] = function + all_facts[function["function_name"]] = function # add current lambda to list of lambdas functions.append(function) @@ -380,10 +393,13 @@ def config_details(client, module, function_name): try: lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get {function_name} configuration") if "Environment" in lambda_info and "Variables" in lambda_info["Environment"]: env_vars = lambda_info["Environment"]["Variables"] @@ -408,16 +424,19 @@ def mapping_details(client, module, function_name): lambda_info = dict() params = dict() - params['FunctionName'] = function_name + params["FunctionName"] = function_name - if module.params.get('event_source_arn'): - params['EventSourceArn'] = module.params.get('event_source_arn') + if module.params.get("event_source_arn"): + params["EventSourceArn"] = module.params.get("event_source_arn") try: - lambda_info.update(mappings=_paginate(client, 'list_event_source_mappings', **params)['EventSourceMappings']) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(mappings=_paginate(client, "list_event_source_mappings", **params)["EventSourceMappings"]) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(mappings=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Trying to get source event mappings") return camel_dict_to_snake_dict(lambda_info) @@ -437,11 +456,14 @@ def policy_details(client, module, function_name): try: # get_policy returns a JSON string so must convert to dict before reassigning to its key - lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)["Policy"])) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(policy={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get {function_name} policy") return camel_dict_to_snake_dict(lambda_info) @@ -459,11 +481,16 @@ def version_details(client, module, function_name): lambda_info = dict() try: - lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update( + versions=_paginate(client, "list_versions_by_function", FunctionName=function_name)["Versions"] + ) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(versions=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get {function_name} versions") return camel_dict_to_snake_dict(lambda_info) @@ -481,11 +508,14 @@ def tags_details(client, module, function_name): lambda_info = dict() try: - lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get("Tags", {})) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get {function_name} tags") return camel_dict_to_snake_dict(lambda_info) @@ -497,49 +527,41 @@ def main(): :return dict: ansible facts """ argument_spec = dict( - function_name=dict(required=False, default=None, aliases=['function', 'name']), - query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default=None), + function_name=dict(required=False, default=None, aliases=["function", "name"]), + query=dict( + required=False, choices=["aliases", "all", "config", "mappings", "policy", "versions", "tags"], default=None + ), event_source_arn=dict(required=False, default=None), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[], - required_together=[] + argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[], required_together=[] ) # validate function_name if present - function_name = module.params['function_name'] + function_name = module.params["function_name"] if function_name: if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - - # create default values for query if not specified. - # if function name exists, query should default to 'all'. - # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas. - if not module.params.get('query'): - if function_name: - module.params['query'] = 'all' - else: - module.params['query'] = 'config' + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) # Deprecate previous return key of `function`, as it was a dict of dicts, as opposed to a list of dicts module.deprecate( - "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be replaced by 'functions'," - " which returns a list of dictionaries. Both keys are returned for now.", - date='2025-01-01', - collection_name='amazon.aws' + ( + "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be" + " replaced by 'functions', which returns a list of dictionaries. Both keys are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", ) list_functions(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py b/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py index 2813a45da..e727277de 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_layer version_added: 5.5.0 @@ -93,13 +91,12 @@ options: I(license_info), I(compatible_architectures). type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" --- # Create a new Python library layer version from a zip archive located into a S3 bucket - name: Create a new python library layer @@ -145,9 +142,9 @@ EXAMPLES = ''' state: absent name: test-layer version: -1 -''' +""" -RETURN = ''' +RETURN = r""" layer_version: description: info about the layer version that was created or deleted. returned: always @@ -220,7 +217,7 @@ layer_version: description: A list of compatible instruction set architectures. returned: if it was defined for the layer version. type: list -''' +""" try: import botocore @@ -229,13 +226,13 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @AWSRetry.jittered_backoff() def _list_layer_versions(client, **params): - paginator = client.get_paginator('list_layer_versions') + paginator = client.get_paginator("list_layer_versions") return paginator.paginate(**params).build_full_result() @@ -247,12 +244,11 @@ class LambdaLayerFailure(Exception): def list_layer_versions(lambda_client, name): - try: - layer_versions = _list_layer_versions(lambda_client, LayerName=name)['LayerVersions'] + layer_versions = _list_layer_versions(lambda_client, LayerName=name)["LayerVersions"] return [camel_dict_to_snake_dict(layer) for layer in layer_versions] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerFailure(e, "Unable to list layer versions for name {0}".format(name)) + raise LambdaLayerFailure(e, f"Unable to list layer versions for name {name}") def create_layer_version(lambda_client, params, check_mode=False): @@ -261,10 +257,10 @@ def create_layer_version(lambda_client, params, check_mode=False): opt = {"LayerName": params.get("name"), "Content": {}} keys = [ - ('description', 'Description'), - ('compatible_runtimes', 'CompatibleRuntimes'), - ('license_info', 'LicenseInfo'), - ('compatible_architectures', 'CompatibleArchitectures'), + ("description", "Description"), + ("compatible_runtimes", "CompatibleRuntimes"), + ("license_info", "LicenseInfo"), + ("compatible_architectures", "CompatibleArchitectures"), ] for k, d in keys: if params.get(k) is not None: @@ -303,14 +299,14 @@ def delete_layer_version(lambda_client, params, check_mode=False): try: lambda_client.delete_layer_version(LayerName=name, VersionNumber=layer["version"]) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - LambdaLayerFailure(e, "Failed to delete layer version LayerName={0}, VersionNumber={1}.".format(name, version)) + raise LambdaLayerFailure( + e, f"Failed to delete layer version LayerName={name}, VersionNumber={version}." + ) return {"changed": changed, "layer_versions": deleted_versions} def execute_module(module, lambda_client): - try: - state = module.params.get("state") f_operation = create_layer_version if state == "absent": @@ -334,9 +330,9 @@ def main(): s3_object_version=dict(type="str"), zip_file=dict(type="path"), ), - required_together=[['s3_bucket', 's3_key']], - required_one_of=[['s3_bucket', 'zip_file']], - mutually_exclusive=[['s3_bucket', 'zip_file']], + required_together=[["s3_bucket", "s3_key"]], + required_one_of=[["s3_bucket", "zip_file"]], + mutually_exclusive=[["s3_bucket", "zip_file"]], ), compatible_runtimes=dict(type="list", elements="str"), license_info=dict(type="str"), @@ -351,18 +347,18 @@ def main(): ("state", "absent", ["version"]), ], mutually_exclusive=[ - ['version', 'description'], - ['version', 'content'], - ['version', 'compatible_runtimes'], - ['version', 'license_info'], - ['version', 'compatible_architectures'], + ["version", "description"], + ["version", "content"], + ["version", "compatible_runtimes"], + ["version", "license_info"], + ["version", "compatible_architectures"], ], supports_check_mode=True, ) - lambda_client = module.client('lambda') + lambda_client = module.client("lambda") execute_module(module, lambda_client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py index ded4c9aab..9894a93a2 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_layer_info version_added: 5.5.0 @@ -23,6 +21,14 @@ options: type: str aliases: - layer_name + version_number: + description: + - The Lambda layer version number to retrieve. + - Requires I(name) to be provided. + type: int + aliases: + - layer_version + version_added: 6.0.0 compatible_runtime: description: - A runtime identifier. @@ -39,13 +45,12 @@ options: - Specify this option with I(name) to include only layer versions that are compatible with that architecture. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" --- # Display information about the versions for the layer named blank-java-lib - name: Retrieve layer versions @@ -66,9 +71,15 @@ EXAMPLES = ''' - name: list latest versions for all layers amazon.aws.lambda_layer_info: compatible_runtime: python3.7 -''' -RETURN = ''' +# Retrieve specific lambda layer information +- name: Get lambda layer version information + amazon.aws.lambda_layer_info: + name: my-layer + version_number: 1 +""" + +RETURN = r""" layers_versions: description: - The layers versions that exists. @@ -114,7 +125,31 @@ layers_versions: description: A list of compatible instruction set architectures. returned: if it was defined for the layer version. type: list -''' + content: + description: Details about the layer version. + returned: if I(version_number) was provided + type: complex + version_added: 6.0.0 + contains: + location: + description: A link to the layer archive in Amazon S3 that is valid for 10 minutes. + type: str + sample: 'https://awslambda-us-east-2-layers.s3.us-east-2.amazonaws.com/snapshots/123456789012/mylayer-4aaa2fbb-96a?versionId=27iWyA73c...' + code_sha256: + description: The SHA-256 hash of the layer archive. + type: str + sample: 'tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=' + code_size: + description: The size of the layer archive in bytes. + type: int + sample: 169 + signing_profile_version_arn: + description: The Amazon Resource Name (ARN) for a signing profile version. + type: str + signing_job_arn: + description: The Amazon Resource Name (ARN) of a signing job. + type: str +""" try: import botocore @@ -123,19 +158,19 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @AWSRetry.jittered_backoff() def _list_layer_versions(client, **params): - paginator = client.get_paginator('list_layer_versions') + paginator = client.get_paginator("list_layer_versions") return paginator.paginate(**params).build_full_result() @AWSRetry.jittered_backoff() def _list_layers(client, **params): - paginator = client.get_paginator('list_layers') + paginator = client.get_paginator("list_layers") return paginator.paginate(**params).build_full_result() @@ -147,28 +182,26 @@ class LambdaLayerInfoFailure(Exception): def list_layer_versions(lambda_client, name, compatible_runtime=None, compatible_architecture=None): - params = {"LayerName": name} if compatible_runtime: params["CompatibleRuntime"] = compatible_runtime if compatible_architecture: params["CompatibleArchitecture"] = compatible_architecture try: - layer_versions = _list_layer_versions(lambda_client, **params)['LayerVersions'] + layer_versions = _list_layer_versions(lambda_client, **params)["LayerVersions"] return [camel_dict_to_snake_dict(layer) for layer in layer_versions] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerInfoFailure(exc=e, msg="Unable to list layer versions for name {0}".format(name)) + raise LambdaLayerInfoFailure(exc=e, msg=f"Unable to list layer versions for name {name}") def list_layers(lambda_client, compatible_runtime=None, compatible_architecture=None): - params = {} if compatible_runtime: params["CompatibleRuntime"] = compatible_runtime if compatible_architecture: params["CompatibleArchitecture"] = compatible_architecture try: - layers = _list_layers(lambda_client, **params)['Layers'] + layers = _list_layers(lambda_client, **params)["Layers"] layer_versions = [] for item in layers: layer = {key: value for key, value in item.items() if key != "LatestMatchingVersion"} @@ -176,26 +209,40 @@ def list_layers(lambda_client, compatible_runtime=None, compatible_architecture= layer_versions.append(camel_dict_to_snake_dict(layer)) return layer_versions except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerInfoFailure(exc=e, msg="Unable to list layers {0}".format(params)) + raise LambdaLayerInfoFailure(exc=e, msg=f"Unable to list layers {params}") -def execute_module(module, lambda_client): +def get_layer_version(lambda_client, layer_name, version_number): + try: + layer_version = lambda_client.get_layer_version(LayerName=layer_name, VersionNumber=version_number) + if layer_version: + layer_version.pop("ResponseMetadata") + return [camel_dict_to_snake_dict(layer_version)] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise LambdaLayerInfoFailure(exc=e, msg="get_layer_version() failed.") - params = {} - f_operation = list_layers + +def execute_module(module, lambda_client): name = module.params.get("name") - if name is not None: - f_operation = list_layer_versions - params["name"] = name - compatible_runtime = module.params.get("compatible_runtime") - if compatible_runtime is not None: - params["compatible_runtime"] = compatible_runtime - compatible_architecture = module.params.get("compatible_architecture") - if compatible_architecture is not None: - params["compatible_architecture"] = compatible_architecture + version_number = module.params.get("version_number") try: - result = f_operation(lambda_client, **params) + if name is not None and version_number is not None: + result = get_layer_version(lambda_client, name, version_number) + else: + params = {} + f_operation = list_layers + if name is not None: + f_operation = list_layer_versions + params["name"] = name + compatible_runtime = module.params.get("compatible_runtime") + if compatible_runtime is not None: + params["compatible_runtime"] = compatible_runtime + compatible_architecture = module.params.get("compatible_architecture") + if compatible_architecture is not None: + params["compatible_architecture"] = compatible_architecture + result = f_operation(lambda_client, **params) + module.exit_json(changed=False, layers_versions=result) except LambdaLayerInfoFailure as e: module.fail_json_aws(exception=e.exc, msg=e.msg) @@ -206,16 +253,16 @@ def main(): name=dict(type="str", aliases=["layer_name"]), compatible_runtime=dict(type="str"), compatible_architecture=dict(type="str"), + version_number=dict(type="int", aliases=["layer_version"]), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True, required_by=dict(version_number=("name",)) ) - lambda_client = module.client('lambda') + lambda_client = module.client("lambda") execute_module(module, lambda_client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py b/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py index 38fbef325..3413d6e79 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions> # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_policy version_added: 5.0.0 @@ -97,13 +95,12 @@ options: type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Lambda S3 event notification amazon.aws.lambda_policy: @@ -120,15 +117,15 @@ EXAMPLES = ''' - name: show results ansible.builtin.debug: var: lambda_policy_action -''' +""" -RETURN = ''' +RETURN = r""" --- lambda_policy_action: description: describes what action was taken returned: success type: str -''' +""" import json import re @@ -139,8 +136,9 @@ except ImportError: pass # caught by AnsibleAWSModule from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def pc(key): @@ -151,11 +149,11 @@ def pc(key): :return: """ - return "".join([token.capitalize() for token in key.split('_')]) + return "".join([token.capitalize() for token in key.split("_")]) def policy_equal(module, current_statement): - for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'): + for param in ("action", "principal", "source_arn", "source_account", "event_source_token"): if module.params.get(param) != current_statement.get(param): return False @@ -189,25 +187,23 @@ def validate_params(module): :return: """ - function_name = module.params['function_name'] + function_name = module.params["function_name"] # validate function name - if function_name.startswith('arn:'): - if not re.search(r'^[\w\-:]+$', function_name): + if function_name.startswith("arn:"): + if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name) + msg=f"ARN {function_name} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.", ) if len(function_name) > 140: - module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name)) + module.fail_json(msg=f'ARN name "{function_name}" exceeds 140 character limit') else: - if not re.search(r'^[\w\-]+$', function_name): + if not re.search(r"^[\w\-]+$", function_name): module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format( - function_name) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) if len(function_name) > 64: - module.fail_json( - msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') def get_qualifier(module): @@ -218,10 +214,10 @@ def get_qualifier(module): :return: """ - if module.params.get('version') is not None: - return to_native(module.params['version']) - elif module.params['alias']: - return to_native(module.params['alias']) + if module.params.get("version") is not None: + return to_native(module.params["version"]) + elif module.params["alias"]: + return to_native(module.params["alias"]) return None @@ -233,32 +229,34 @@ def extract_statement(policy, sid): return it in a flattened form. Otherwise return an empty dictionary. """ - if 'Statement' not in policy: + if "Statement" not in policy: return {} policy_statement = {} # Now that we have the policy, check if required permission statement is present and flatten to # simple dictionary if found. - for statement in policy['Statement']: - if statement['Sid'] == sid: - policy_statement['action'] = statement['Action'] + for statement in policy["Statement"]: + if statement["Sid"] == sid: + policy_statement["action"] = statement["Action"] try: - policy_statement['principal'] = statement['Principal']['Service'] + policy_statement["principal"] = statement["Principal"]["Service"] except KeyError: pass try: - policy_statement['principal'] = statement['Principal']['AWS'] + policy_statement["principal"] = statement["Principal"]["AWS"] except KeyError: pass try: - policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn'] + policy_statement["source_arn"] = statement["Condition"]["ArnLike"]["AWS:SourceArn"] except KeyError: pass try: - policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount'] + policy_statement["source_account"] = statement["Condition"]["StringEquals"]["AWS:SourceAccount"] except KeyError: pass try: - policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken'] + policy_statement["event_source_token"] = statement["Condition"]["StringEquals"][ + "lambda:EventSourceToken" + ] except KeyError: pass break @@ -273,10 +271,10 @@ def get_policy_statement(module, client): :param client: :return: """ - sid = module.params['statement_id'] + sid = module.params["statement_id"] # set API parameters - api_params = set_api_params(module, ('function_name', )) + api_params = set_api_params(module, ("function_name",)) qualifier = get_qualifier(module) if qualifier: api_params.update(Qualifier=qualifier) @@ -285,13 +283,16 @@ def get_policy_statement(module, client): # check if function policy exists try: policy_results = client.get_policy(**api_params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="retrieving function policy") # get_policy returns a JSON string so must convert to dict before reassigning to its key - policy = json.loads(policy_results.get('Policy', '{}')) + policy = json.loads(policy_results.get("Policy", "{}")) return extract_statement(policy, sid) @@ -308,13 +309,14 @@ def add_policy_permission(module, client): # set API parameters params = ( - 'function_name', - 'statement_id', - 'action', - 'principal', - 'source_arn', - 'source_account', - 'event_source_token') + "function_name", + "statement_id", + "action", + "principal", + "source_arn", + "source_account", + "event_source_token", + ) api_params = set_api_params(module, params) qualifier = get_qualifier(module) if qualifier: @@ -342,7 +344,7 @@ def remove_policy_permission(module, client): changed = False # set API parameters - api_params = set_api_params(module, ('function_name', 'statement_id')) + api_params = set_api_params(module, ("function_name", "statement_id")) qualifier = get_qualifier(module) if qualifier: api_params.update(Qualifier=qualifier) @@ -359,40 +361,44 @@ def remove_policy_permission(module, client): def manage_state(module, lambda_client): changed = False - current_state = 'absent' - state = module.params['state'] - action_taken = 'none' + current_state = "absent" + state = module.params["state"] + action_taken = "none" # check if the policy exists current_policy_statement = get_policy_statement(module, lambda_client) if current_policy_statement: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present' and not policy_equal(module, current_policy_statement): + if state == "present": + if current_state == "present" and not policy_equal(module, current_policy_statement): remove_policy_permission(module, lambda_client) changed = add_policy_permission(module, lambda_client) - action_taken = 'updated' - if not current_state == 'present': + action_taken = "updated" + if not current_state == "present": changed = add_policy_permission(module, lambda_client) - action_taken = 'added' - elif current_state == 'present': + action_taken = "added" + elif current_state == "present": # remove the policy statement changed = remove_policy_permission(module, lambda_client) - action_taken = 'deleted' + action_taken = "deleted" return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken)) def setup_module_object(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']), - statement_id=dict(required=True, aliases=['sid']), + state=dict(default="present", choices=["present", "absent"]), + function_name=dict(required=True, aliases=["lambda_function_arn", "function_arn"]), + statement_id=dict(required=True, aliases=["sid"]), alias=dict(), - version=dict(type='int'), - action=dict(required=True, ), - principal=dict(required=True, ), + version=dict(type="int"), + action=dict( + required=True, + ), + principal=dict( + required=True, + ), source_arn=dict(), source_account=dict(), event_source_token=dict(no_log=False), @@ -401,9 +407,11 @@ def setup_module_object(): return AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['alias', 'version'], - ['event_source_token', 'source_arn'], - ['event_source_token', 'source_account']], + mutually_exclusive=[ + ["alias", "version"], + ["event_source_token", "source_arn"], + ["event_source_token", "source_account"], + ], ) @@ -415,12 +423,12 @@ def main(): """ module = setup_module_object() - client = module.client('lambda') + client = module.client("lambda") validate_params(module) results = manage_state(module, client) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py index 5eec23c88..0e5634e59 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2022 Ansible Project # Copyright (c) 2022 Alina Buzachis (@alinabuzachis) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_cluster version_added: 5.0.0 @@ -16,8 +14,8 @@ description: - Create, modify, and delete RDS clusters. - This module was originally added to C(community.aws) in release 3.2.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 author: @@ -26,8 +24,11 @@ author: options: # General module options state: - description: Whether the snapshot should exist or not. - choices: ['present', 'absent'] + description: + - Whether the snapshot should exist or not. + - C(started) and C(stopped) can only be used with aurora clusters + - Support for C(started) and C(stopped) was added in release 6.3.0. + choices: ['present', 'absent', 'started', 'stopped'] default: 'present' type: str creation_source: @@ -257,7 +258,7 @@ options: master_user_password: description: - An 8-41 character password for the master database user. - - The password can contain any printable ASCII character except "/", """, or "@". + - The password can contain any printable ASCII character except C(/), C("), or C(@). - To modify the password use I(force_password_update). Use I(apply immediately) to change the password immediately, otherwise it is updated during the next maintenance window. aliases: @@ -304,6 +305,13 @@ options: aliases: - maintenance_window type: str + remove_from_global_db: + description: + - If set to C(true), the cluster will be removed from global DB. + - Parameters I(global_cluster_identifier), I(db_cluster_identifier) must be specified when I(remove_from_global_db=true). + type: bool + required: false + version_added: 6.5.0 replication_source_identifier: description: - The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica. @@ -342,6 +350,24 @@ options: - The prefix for all of the file names that contain the data used to create the Amazon Aurora DB cluster. - If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket. type: str + serverless_v2_scaling_configuration: + description: + - Contains the scaling configuration of an Aurora Serverless v2 DB cluster. + type: dict + suboptions: + min_capacity: + description: + - The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. + - ACU values can be specified in in half-step increments, such as C(8), C(8.5), C(9), and so on. + - The smallest possible value is C(0.5). + type: float + max_capacity: + description: + - The maximum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. + - ACU values can be specified in in half-step increments, such as C(40), C(40.5), C(41), and so on. + - The largest possible value is C(128). + type: float + version_added: 7.3.0 skip_final_snapshot: description: - Whether a final DB cluster snapshot is created before the DB cluster is deleted. @@ -390,9 +416,9 @@ options: - A list of EC2 VPC security groups to associate with the DB cluster. type: list elements: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create minimal aurora cluster in default VPC and default subnet group amazon.aws.rds_cluster: @@ -432,7 +458,7 @@ EXAMPLES = r''' password: "{{ password }}" username: "{{ username }}" cluster_id: "{{ cluster_id }}" - skip_final_snapshot: True + skip_final_snapshot: true tags: Name: "cluster-{{ resource_prefix }}" Created_By: "Ansible_rds_cluster_integration_test" @@ -462,9 +488,45 @@ EXAMPLES = r''' engine: aurora-postgresql state: present db_instance_class: 'db.t3.medium' -''' -RETURN = r''' +- name: Remove a cluster from global DB (do not delete) + amazon.aws.rds_cluster: + db_cluster_identifier: '{{ cluster_id }}' + global_cluster_identifier: '{{ global_cluster_id }}' + remove_from_global_db: true + +- name: Remove a cluster from global DB and Delete without creating a final snapshot + amazon.aws.rds_cluster: + engine: aurora + password: "{{ password }}" + username: "{{ username }}" + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: true + remove_from_global_db: true + wait: true + state: absent + +- name: Update cluster port and WAIT for remove secondary DB cluster from global DB to complete + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ secondary_cluster_name }}" + global_cluster_identifier: "{{ global_cluster_name }}" + remove_from_global_db: true + state: present + port: 3389 + region: "{{ secondary_cluster_region }}" + +- name: Update cluster port and DO NOT WAIT for remove secondary DB cluster from global DB to complete + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ secondary_cluster_name }}" + global_cluster_identifier: "{{ global_cluster_name }}" + remove_from_global_db: true + state: present + port: 3389 + region: "{{ secondary_cluster_region }}" + wait: false +""" + +RETURN = r""" activity_stream_status: description: The status of the database activity stream. returned: always @@ -646,6 +708,15 @@ reader_endpoint: returned: always type: str sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com +serverless_v2_scaling_configuration: + description: The scaling configuration for an Aurora Serverless v2 DB cluster. + returned: when configured + type: dict + sample: { + "max_capacity": 4.5, + "min_capacity": 2.5 + } + version_added: 7.3.0 status: description: The status of the DB cluster. returned: always @@ -689,8 +760,7 @@ vpc_security_groups: returned: always type: str sample: sg-12345678 -''' - +""" try: import botocore @@ -699,40 +769,40 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.rds import wait_for_cluster_status +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import wait_for_cluster_status +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list @AWSRetry.jittered_backoff(retries=10) def _describe_db_clusters(**params): try: - paginator = client.get_paginator('describe_db_clusters') - return paginator.paginate(**params).build_full_result()['DBClusters'][0] - except is_boto3_error_code('DBClusterNotFoundFault'): + paginator = client.get_paginator("describe_db_clusters") + return paginator.paginate(**params).build_full_result()["DBClusters"][0] + except is_boto3_error_code("DBClusterNotFoundFault"): return {} def get_add_role_options(params_dict, cluster): - current_role_arns = [role['RoleArn'] for role in cluster.get('AssociatedRoles', [])] - role = params_dict['RoleArn'] + current_role_arns = [role["RoleArn"] for role in cluster.get("AssociatedRoles", [])] + role = params_dict["RoleArn"] if role is not None and role not in current_role_arns: - return {'RoleArn': role, 'DBClusterIdentifier': params_dict['DBClusterIdentifier']} + return {"RoleArn": role, "DBClusterIdentifier": params_dict["DBClusterIdentifier"]} return {} def get_backtrack_options(params_dict): - options = ['BacktrackTo', 'DBClusterIdentifier', 'UseEarliestTimeOnPointInTimeUnavailable'] - if params_dict['BacktrackTo'] is not None: + options = ["BacktrackTo", "DBClusterIdentifier", "UseEarliestTimeOnPointInTimeUnavailable"] + if params_dict["BacktrackTo"] is not None: options = dict((k, params_dict[k]) for k in options if params_dict[k] is not None) - if 'ForceBacktrack' in params_dict: - options['Force'] = params_dict['ForceBacktrack'] + if "ForceBacktrack" in params_dict: + options["Force"] = params_dict["ForceBacktrack"] return options return {} @@ -772,6 +842,13 @@ def get_create_options(params_dict): "Domain", "DomainIAMRoleName", "EnableGlobalWriteForwarding", + "GlobalClusterIdentifier", + "AllocatedStorage", + "DBClusterInstanceClass", + "StorageType", + "Iops", + "EngineMode", + "ServerlessV2ScalingConfiguration", ] return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) @@ -779,34 +856,80 @@ def get_create_options(params_dict): def get_modify_options(params_dict, force_update_password): options = [ - 'ApplyImmediately', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow', - 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'EnableIAMDatabaseAuthentication', - 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'NewDBClusterIdentifier', - 'OptionGroupName', 'Port', 'VpcSecurityGroupIds', 'EnableIAMDatabaseAuthentication', - 'CloudwatchLogsExportConfiguration', 'DeletionProtection', 'EnableHttpEndpoint', - 'CopyTagsToSnapshot', 'EnableGlobalWriteForwarding', 'Domain', 'DomainIAMRoleName', + "ApplyImmediately", + "BacktrackWindow", + "BackupRetentionPeriod", + "PreferredBackupWindow", + "DBClusterIdentifier", + "DBClusterParameterGroupName", + "EnableIAMDatabaseAuthentication", + "EngineVersion", + "PreferredMaintenanceWindow", + "MasterUserPassword", + "NewDBClusterIdentifier", + "OptionGroupName", + "Port", + "VpcSecurityGroupIds", + "EnableIAMDatabaseAuthentication", + "CloudwatchLogsExportConfiguration", + "DeletionProtection", + "EnableHttpEndpoint", + "CopyTagsToSnapshot", + "EnableGlobalWriteForwarding", + "Domain", + "DomainIAMRoleName", + "AllocatedStorage", + "DBClusterInstanceClass", + "StorageType", + "Iops", + "EngineMode", + "ServerlessV2ScalingConfiguration", ] modify_options = dict((k, v) for k, v in params_dict.items() if k in options and v is not None) if not force_update_password: - modify_options.pop('MasterUserPassword', None) + modify_options.pop("MasterUserPassword", None) return modify_options def get_delete_options(params_dict): - options = ['DBClusterIdentifier', 'FinalSnapshotIdentifier', 'SkipFinalSnapshot'] + options = ["DBClusterIdentifier", "FinalSnapshotIdentifier", "SkipFinalSnapshot"] return dict((k, params_dict[k]) for k in options if params_dict[k] is not None) def get_restore_s3_options(params_dict): options = [ - 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'CharacterSetName', - 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName', 'DatabaseName', - 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'Engine', 'EngineVersion', - 'KmsKeyId', 'MasterUserPassword', 'MasterUsername', 'OptionGroupName', 'Port', - 'PreferredBackupWindow', 'PreferredMaintenanceWindow', 'S3BucketName', 'S3IngestionRoleArn', - 'S3Prefix', 'SourceEngine', 'SourceEngineVersion', 'StorageEncrypted', 'Tags', - 'VpcSecurityGroupIds', 'DeletionProtection', 'EnableHttpEndpoint', 'CopyTagsToSnapshot', - 'Domain', 'DomainIAMRoleName', + "AvailabilityZones", + "BacktrackWindow", + "BackupRetentionPeriod", + "CharacterSetName", + "DBClusterIdentifier", + "DBClusterParameterGroupName", + "DBSubnetGroupName", + "DatabaseName", + "EnableCloudwatchLogsExports", + "EnableIAMDatabaseAuthentication", + "Engine", + "EngineVersion", + "KmsKeyId", + "MasterUserPassword", + "MasterUsername", + "OptionGroupName", + "Port", + "PreferredBackupWindow", + "PreferredMaintenanceWindow", + "S3BucketName", + "S3IngestionRoleArn", + "S3Prefix", + "SourceEngine", + "SourceEngineVersion", + "StorageEncrypted", + "Tags", + "VpcSecurityGroupIds", + "DeletionProtection", + "EnableHttpEndpoint", + "CopyTagsToSnapshot", + "Domain", + "DomainIAMRoleName", ] return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) @@ -814,52 +937,88 @@ def get_restore_s3_options(params_dict): def get_restore_snapshot_options(params_dict): options = [ - 'AvailabilityZones', 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', - 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', - 'Engine', 'EngineVersion', 'KmsKeyId', 'OptionGroupName', 'Port', 'SnapshotIdentifier', - 'Tags', 'VpcSecurityGroupIds', 'DBClusterParameterGroupName', 'DeletionProtection', - 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName', + "AvailabilityZones", + "BacktrackWindow", + "DBClusterIdentifier", + "DBSubnetGroupName", + "DatabaseName", + "EnableCloudwatchLogsExports", + "EnableIAMDatabaseAuthentication", + "Engine", + "EngineVersion", + "KmsKeyId", + "OptionGroupName", + "Port", + "SnapshotIdentifier", + "Tags", + "VpcSecurityGroupIds", + "DBClusterParameterGroupName", + "DeletionProtection", + "CopyTagsToSnapshot", + "Domain", + "DomainIAMRoleName", ] return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) def get_restore_cluster_options(params_dict): options = [ - 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', 'EnableCloudwatchLogsExports', - 'EnableIAMDatabaseAuthentication', 'KmsKeyId', 'OptionGroupName', 'Port', 'RestoreToTime', - 'RestoreType', 'SourceDBClusterIdentifier', 'Tags', 'UseLatestRestorableTime', - 'VpcSecurityGroupIds', 'DeletionProtection', 'CopyTagsToSnapshot', 'Domain', - 'DomainIAMRoleName', + "BacktrackWindow", + "DBClusterIdentifier", + "DBSubnetGroupName", + "EnableCloudwatchLogsExports", + "EnableIAMDatabaseAuthentication", + "KmsKeyId", + "OptionGroupName", + "Port", + "RestoreToTime", + "RestoreType", + "SourceDBClusterIdentifier", + "Tags", + "UseLatestRestorableTime", + "VpcSecurityGroupIds", + "DeletionProtection", + "CopyTagsToSnapshot", + "Domain", + "DomainIAMRoleName", ] return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) def get_rds_method_attribute_name(cluster): - state = module.params['state'] - creation_source = module.params['creation_source'] + state = module.params["state"] + creation_source = module.params["creation_source"] method_name = None method_options_name = None - if state == 'absent': - if cluster and cluster['Status'] not in ['deleting', 'deleted']: - method_name = 'delete_db_cluster' - method_options_name = 'get_delete_options' + if state == "absent": + if cluster and cluster["Status"] not in ["deleting", "deleted"]: + method_name = "delete_db_cluster" + method_options_name = "get_delete_options" + elif state == "started": + if cluster and cluster["Status"] not in ["starting", "started", "available"]: + method_name = "start_db_cluster" + method_options_name = "get_modify_options" + elif state == "stopped": + if cluster and cluster["Status"] not in ["stopping", "stopped"]: + method_name = "stop_db_cluster" + method_options_name = "get_modify_options" else: if cluster: - method_name = 'modify_db_cluster' - method_options_name = 'get_modify_options' - elif creation_source == 'snapshot': - method_name = 'restore_db_cluster_from_snapshot' - method_options_name = 'get_restore_snapshot_options' - elif creation_source == 's3': - method_name = 'restore_db_cluster_from_s3' - method_options_name = 'get_restore_s3_options' - elif creation_source == 'cluster': - method_name = 'restore_db_cluster_to_point_in_time' - method_options_name = 'get_restore_cluster_options' + method_name = "modify_db_cluster" + method_options_name = "get_modify_options" + elif creation_source == "snapshot": + method_name = "restore_db_cluster_from_snapshot" + method_options_name = "get_restore_snapshot_options" + elif creation_source == "s3": + method_name = "restore_db_cluster_from_s3" + method_options_name = "get_restore_s3_options" + elif creation_source == "cluster": + method_name = "restore_db_cluster_to_point_in_time" + method_options_name = "get_restore_cluster_options" else: - method_name = 'create_db_cluster' - method_options_name = 'get_create_options' + method_name = "create_db_cluster" + method_options_name = "get_create_options" return method_name, method_options_name @@ -869,8 +1028,10 @@ def add_role(params): try: client.add_role_to_db_cluster(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Unable to add role {params['RoleArn']} to cluster {params['DBClusterIdentifier']}") - wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') + module.fail_json_aws( + e, msg=f"Unable to add role {params['RoleArn']} to cluster {params['DBClusterIdentifier']}" + ) + wait_for_cluster_status(client, module, params["DBClusterIdentifier"], "cluster_available") def backtrack_cluster(params): @@ -879,7 +1040,7 @@ def backtrack_cluster(params): client.backtrack_db_cluster(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg=f"Unable to backtrack cluster {params['DBClusterIdentifier']}") - wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') + wait_for_cluster_status(client, module, params["DBClusterIdentifier"], "cluster_available") def get_cluster(db_cluster_id): @@ -891,47 +1052,51 @@ def get_cluster(db_cluster_id): def changing_cluster_options(modify_params, current_cluster): changing_params = {} - apply_immediately = modify_params.pop('ApplyImmediately') - db_cluster_id = modify_params.pop('DBClusterIdentifier') + apply_immediately = modify_params.pop("ApplyImmediately") + db_cluster_id = modify_params.pop("DBClusterIdentifier") - enable_cloudwatch_logs_export = modify_params.pop('EnableCloudwatchLogsExports', None) + enable_cloudwatch_logs_export = modify_params.pop("EnableCloudwatchLogsExports", None) if enable_cloudwatch_logs_export is not None: - desired_cloudwatch_logs_configuration = {'EnableLogTypes': [], 'DisableLogTypes': []} + desired_cloudwatch_logs_configuration = {"EnableLogTypes": [], "DisableLogTypes": []} provided_cloudwatch_logs = set(enable_cloudwatch_logs_export) - current_cloudwatch_logs_export = set(current_cluster['EnabledCloudwatchLogsExports']) - - desired_cloudwatch_logs_configuration['EnableLogTypes'] = list(provided_cloudwatch_logs.difference(current_cloudwatch_logs_export)) - if module.params['purge_cloudwatch_logs_exports']: - desired_cloudwatch_logs_configuration['DisableLogTypes'] = list(current_cloudwatch_logs_export.difference(provided_cloudwatch_logs)) - changing_params['CloudwatchLogsExportConfiguration'] = desired_cloudwatch_logs_configuration - - password = modify_params.pop('MasterUserPassword', None) + current_cloudwatch_logs_export = set(current_cluster["EnabledCloudwatchLogsExports"]) + + desired_cloudwatch_logs_configuration["EnableLogTypes"] = list( + provided_cloudwatch_logs.difference(current_cloudwatch_logs_export) + ) + if module.params["purge_cloudwatch_logs_exports"]: + desired_cloudwatch_logs_configuration["DisableLogTypes"] = list( + current_cloudwatch_logs_export.difference(provided_cloudwatch_logs) + ) + changing_params["CloudwatchLogsExportConfiguration"] = desired_cloudwatch_logs_configuration + + password = modify_params.pop("MasterUserPassword", None) if password: - changing_params['MasterUserPassword'] = password + changing_params["MasterUserPassword"] = password - new_cluster_id = modify_params.pop('NewDBClusterIdentifier', None) - if new_cluster_id and new_cluster_id != current_cluster['DBClusterIdentifier']: - changing_params['NewDBClusterIdentifier'] = new_cluster_id + new_cluster_id = modify_params.pop("NewDBClusterIdentifier", None) + if new_cluster_id and new_cluster_id != current_cluster["DBClusterIdentifier"]: + changing_params["NewDBClusterIdentifier"] = new_cluster_id - option_group = modify_params.pop('OptionGroupName', None) - if ( - option_group and option_group not in [g['DBClusterOptionGroupName'] for g in current_cluster['DBClusterOptionGroupMemberships']] - ): - changing_params['OptionGroupName'] = option_group + option_group = modify_params.pop("OptionGroupName", None) + if option_group and option_group not in [ + g["DBClusterOptionGroupName"] for g in current_cluster["DBClusterOptionGroupMemberships"] + ]: + changing_params["OptionGroupName"] = option_group - vpc_sgs = modify_params.pop('VpcSecurityGroupIds', None) + vpc_sgs = modify_params.pop("VpcSecurityGroupIds", None) if vpc_sgs: desired_vpc_sgs = [] provided_vpc_sgs = set(vpc_sgs) - current_vpc_sgs = set([sg['VpcSecurityGroupId'] for sg in current_cluster['VpcSecurityGroups']]) - if module.params['purge_security_groups']: + current_vpc_sgs = set([sg["VpcSecurityGroupId"] for sg in current_cluster["VpcSecurityGroups"]]) + if module.params["purge_security_groups"]: desired_vpc_sgs = vpc_sgs else: if provided_vpc_sgs - current_vpc_sgs: desired_vpc_sgs = list(provided_vpc_sgs | current_vpc_sgs) if desired_vpc_sgs: - changing_params['VpcSecurityGroupIds'] = desired_vpc_sgs + changing_params["VpcSecurityGroupIds"] = desired_vpc_sgs desired_db_cluster_parameter_group = modify_params.pop("DBClusterParameterGroupName", None) if desired_db_cluster_parameter_group: @@ -943,9 +1108,19 @@ def changing_cluster_options(modify_params, current_cluster): changing_params[param] = modify_params[param] if changing_params: - changing_params['DBClusterIdentifier'] = db_cluster_id + changing_params["DBClusterIdentifier"] = db_cluster_id if apply_immediately is not None: - changing_params['ApplyImmediately'] = apply_immediately + changing_params["ApplyImmediately"] = apply_immediately + + if module.params["state"] == "started": + if current_cluster["Engine"] in ["mysql", "postgres"]: + module.fail_json("Only aurora clusters can use the state started") + changing_params["DBClusterIdentifier"] = db_cluster_id + + if module.params["state"] == "stopped": + if current_cluster["Engine"] in ["mysql", "postgres"]: + module.fail_json("Only aurora clusters can use the state stopped") + changing_params["DBClusterIdentifier"] = db_cluster_id return changing_params @@ -954,8 +1129,9 @@ def ensure_present(cluster, parameters, method_name, method_options_name): changed = False if not cluster: - if parameters.get('Tags') is not None: - parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + if parameters.get("Tags") is not None: + parameters["Tags"] = ansible_dict_to_boto3_tag_list(parameters["Tags"]) + call_method(client, module, method_name, eval(method_options_name)(parameters)) changed = True else: @@ -963,65 +1139,104 @@ def ensure_present(cluster, parameters, method_name, method_options_name): backtrack_cluster(client, module, get_backtrack_options(parameters)) changed = True else: - modifiable_options = eval(method_options_name)(parameters, - force_update_password=module.params['force_update_password']) + modifiable_options = eval(method_options_name)( + parameters, force_update_password=module.params["force_update_password"] + ) modify_options = changing_cluster_options(modifiable_options, cluster) if modify_options: call_method(client, module, method_name, modify_options) changed = True - if module.params['tags'] is not None: - existing_tags = get_tags(client, module, cluster['DBClusterArn']) - changed |= ensure_tags(client, module, cluster['DBClusterArn'], existing_tags, module.params['tags'], - module.params['purge_tags']) + if module.params["tags"] is not None: + existing_tags = get_tags(client, module, cluster["DBClusterArn"]) + changed |= ensure_tags( + client, + module, + cluster["DBClusterArn"], + existing_tags, + module.params["tags"], + module.params["purge_tags"], + ) add_role_params = get_add_role_options(parameters, cluster) if add_role_params: add_role(client, module, add_role_params) changed = True - if module.params['promote'] and cluster.get('ReplicationSourceIdentifier'): - call_method(client, module, 'promote_read_replica_db_cluster', parameters={'DBClusterIdentifier': module.params['db_cluster_identifier']}) + if module.params["promote"] and cluster.get("ReplicationSourceIdentifier"): + call_method( + client, + module, + "promote_read_replica_db_cluster", + parameters={"DBClusterIdentifier": module.params["db_cluster_identifier"]}, + ) changed = True return changed +def handle_remove_from_global_db(module, cluster): + global_cluster_id = module.params.get("global_cluster_identifier") + db_cluster_id = module.params.get("db_cluster_identifier") + db_cluster_arn = cluster["DBClusterArn"] + + if module.check_mode: + return True + + try: + client.remove_from_global_cluster(DbClusterIdentifier=db_cluster_arn, GlobalClusterIdentifier=global_cluster_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg=f"Failed to remove cluster {db_cluster_id} from global DB cluster {global_cluster_id}." + ) + + # for replica cluster - wait for cluster to change status from 'available' to 'promoting' + # only replica/secondary clusters have "GlobalWriteForwardingStatus" field + if "GlobalWriteForwardingStatus" in cluster: + wait_for_cluster_status(client, module, db_cluster_id, "db_cluster_promoting") + + # if wait=true, wait for db cluster remove from global db operation to complete + if module.params.get("wait"): + wait_for_cluster_status(client, module, db_cluster_id, "cluster_available") + + return True + + def main(): global module global client arg_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - creation_source=dict(type='str', choices=['snapshot', 's3', 'cluster']), - force_update_password=dict(type='bool', default=False), - promote=dict(type='bool', default=False), - purge_cloudwatch_logs_exports=dict(type='bool', default=True), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - purge_security_groups=dict(type='bool', default=True), + state=dict(choices=["present", "absent", "started", "stopped"], default="present"), + creation_source=dict(type="str", choices=["snapshot", "s3", "cluster"]), + force_update_password=dict(type="bool", default=False), + promote=dict(type="bool", default=False), + purge_cloudwatch_logs_exports=dict(type="bool", default=True), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + purge_security_groups=dict(type="bool", default=True), ) parameter_options = dict( - apply_immediately=dict(type='bool', default=False), - availability_zones=dict(type='list', elements='str', aliases=['zones', 'az']), + apply_immediately=dict(type="bool", default=False), + availability_zones=dict(type="list", elements="str", aliases=["zones", "az"]), backtrack_to=dict(), - backtrack_window=dict(type='int'), - backup_retention_period=dict(type='int', default=1), + backtrack_window=dict(type="int"), + backup_retention_period=dict(type="int", default=1), character_set_name=dict(), - database_name=dict(aliases=['db_name']), - db_cluster_identifier=dict(required=True, aliases=['cluster_id', 'id', 'cluster_name']), + database_name=dict(aliases=["db_name"]), + db_cluster_identifier=dict(required=True, aliases=["cluster_id", "id", "cluster_name"]), db_cluster_parameter_group_name=dict(), db_subnet_group_name=dict(), - enable_cloudwatch_logs_exports=dict(type='list', elements='str'), - deletion_protection=dict(type='bool'), + enable_cloudwatch_logs_exports=dict(type="list", elements="str"), + deletion_protection=dict(type="bool"), global_cluster_identifier=dict(), - enable_http_endpoint=dict(type='bool'), - copy_tags_to_snapshot=dict(type='bool'), + enable_http_endpoint=dict(type="bool"), + copy_tags_to_snapshot=dict(type="bool"), domain=dict(), domain_iam_role_name=dict(), - enable_global_write_forwarding=dict(type='bool'), + enable_global_write_forwarding=dict(type="bool"), db_cluster_instance_class=dict(type="str"), - enable_iam_database_authentication=dict(type='bool'), + enable_iam_database_authentication=dict(type="bool"), engine=dict(choices=["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"]), engine_mode=dict(choices=["provisioned", "serverless", "parallelquery", "global", "multimaster"]), engine_version=dict(), @@ -1029,47 +1244,64 @@ def main(): storage_type=dict(type="str", choices=["io1"]), iops=dict(type="int"), final_snapshot_identifier=dict(), - force_backtrack=dict(type='bool'), + force_backtrack=dict(type="bool"), kms_key_id=dict(), - master_user_password=dict(aliases=['password'], no_log=True), - master_username=dict(aliases=['username']), - new_db_cluster_identifier=dict(aliases=['new_cluster_id', 'new_id', 'new_cluster_name']), + master_user_password=dict(aliases=["password"], no_log=True), + master_username=dict(aliases=["username"]), + new_db_cluster_identifier=dict(aliases=["new_cluster_id", "new_id", "new_cluster_name"]), option_group_name=dict(), - port=dict(type='int'), - preferred_backup_window=dict(aliases=['backup_window']), - preferred_maintenance_window=dict(aliases=['maintenance_window']), - replication_source_identifier=dict(aliases=['replication_src_id']), + port=dict(type="int"), + preferred_backup_window=dict(aliases=["backup_window"]), + preferred_maintenance_window=dict(aliases=["maintenance_window"]), + remove_from_global_db=dict(type="bool"), + replication_source_identifier=dict(aliases=["replication_src_id"]), restore_to_time=dict(), - restore_type=dict(choices=['full-copy', 'copy-on-write']), + restore_type=dict(choices=["full-copy", "copy-on-write"]), role_arn=dict(), s3_bucket_name=dict(), s3_ingestion_role_arn=dict(), s3_prefix=dict(), - skip_final_snapshot=dict(type='bool', default=False), + serverless_v2_scaling_configuration=dict( + type="dict", + options=dict( + min_capacity=dict(type="float"), + max_capacity=dict(type="float"), + ), + ), + skip_final_snapshot=dict(type="bool", default=False), snapshot_identifier=dict(), source_db_cluster_identifier=dict(), - source_engine=dict(choices=['mysql']), + source_engine=dict(choices=["mysql"]), source_engine_version=dict(), source_region=dict(), - storage_encrypted=dict(type='bool'), - tags=dict(type='dict', aliases=['resource_tags']), - use_earliest_time_on_point_in_time_unavailable=dict(type='bool'), - use_latest_restorable_time=dict(type='bool'), - vpc_security_group_ids=dict(type='list', elements='str'), + storage_encrypted=dict(type="bool"), + tags=dict(type="dict", aliases=["resource_tags"]), + use_earliest_time_on_point_in_time_unavailable=dict(type="bool"), + use_latest_restorable_time=dict(type="bool"), + vpc_security_group_ids=dict(type="list", elements="str"), ) arg_spec.update(parameter_options) + required_by_s3_creation_source = [ + "s3_bucket_name", + "engine", + "master_username", + "master_user_password", + "source_engine", + "source_engine_version", + "s3_ingestion_role_arn", + ] + module = AnsibleAWSModule( argument_spec=arg_spec, required_if=[ - ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), - ('creation_source', 's3', ( - 's3_bucket_name', 'engine', 'master_username', 'master_user_password', - 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ["creation_source", "snapshot", ["snapshot_identifier", "engine"]], + ["creation_source", "s3", required_by_s3_creation_source], + ["remove_from_global_db", True, ["global_cluster_identifier", "db_cluster_identifier"]], ], mutually_exclusive=[ - ('s3_bucket_name', 'source_db_cluster_identifier', 'snapshot_identifier'), - ('use_latest_restorable_time', 'restore_to_time'), + ["s3_bucket_name", "source_db_cluster_identifier", "snapshot_identifier"], + ["use_latest_restorable_time", "restore_to_time"], ], supports_check_mode=True, ) @@ -1077,12 +1309,11 @@ def main(): retry_decorator = AWSRetry.jittered_backoff(retries=10) try: - client = module.client('rds', retry_decorator=retry_decorator) + client = module.client("rds", retry_decorator=retry_decorator) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") if module.params.get("engine") and module.params["engine"] in ("mysql", "postgres"): - module.require_botocore_at_least("1.23.44", reason="to use mysql and postgres engines") if module.params["state"] == "present": if not ( module.params.get("allocated_storage") @@ -1090,54 +1321,71 @@ def main(): and module.params.get("db_cluster_instance_class") ): module.fail_json( - f"When engine={module.params['engine']} allocated_storage, iops and db_cluster_instance_class msut be specified" + f"When engine={module.params['engine']} allocated_storage, iops and db_cluster_instance_class must be specified" ) else: # Fall to default value if not module.params.get("storage_type"): module.params["storage_type"] = "io1" - module.params['db_cluster_identifier'] = module.params['db_cluster_identifier'].lower() - cluster = get_cluster(module.params['db_cluster_identifier']) + module.params["db_cluster_identifier"] = module.params["db_cluster_identifier"].lower() + cluster = get_cluster(module.params["db_cluster_identifier"]) - if module.params['new_db_cluster_identifier']: - module.params['new_db_cluster_identifier'] = module.params['new_db_cluster_identifier'].lower() + if module.params["new_db_cluster_identifier"]: + module.params["new_db_cluster_identifier"] = module.params["new_db_cluster_identifier"].lower() - if get_cluster(module.params['new_db_cluster_identifier']): - module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but it already exists") + if get_cluster(module.params["new_db_cluster_identifier"]): + module.fail_json( + f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but it already exists" + ) if not cluster: - module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but the cluster to be renamed does not exist") + module.fail_json( + f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but the cluster to be renamed does not exist" + ) if ( - module.params['state'] == 'absent' and module.params['skip_final_snapshot'] is False and - module.params['final_snapshot_identifier'] is None + module.params["state"] == "absent" + and module.params["skip_final_snapshot"] is False + and module.params["final_snapshot_identifier"] is None ): - module.fail_json(msg='skip_final_snapshot is False but all of the following are missing: final_snapshot_identifier') - - parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + module.fail_json( + msg="skip_final_snapshot is False but all of the following are missing: final_snapshot_identifier" + ) changed = False + + parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) method_name, method_options_name = get_rds_method_attribute_name(cluster) if method_name: - if method_name == 'delete_db_cluster': + if method_name == "delete_db_cluster": + if cluster and module.params.get("remove_from_global_db"): + if cluster["Engine"] in ["aurora", "aurora-mysql", "aurora-postgresql"]: + changed = handle_remove_from_global_db(module, cluster) + call_method(client, module, method_name, eval(method_options_name)(parameters)) changed = True else: changed |= ensure_present(cluster, parameters, method_name, method_options_name) - if not module.check_mode and module.params['new_db_cluster_identifier'] and module.params['apply_immediately']: - cluster_id = module.params['new_db_cluster_identifier'] + if not module.check_mode and module.params["new_db_cluster_identifier"] and module.params["apply_immediately"]: + cluster_id = module.params["new_db_cluster_identifier"] else: - cluster_id = module.params['db_cluster_identifier'] + cluster_id = module.params["db_cluster_identifier"] + + if cluster_id and get_cluster(cluster_id) and module.params.get("remove_from_global_db"): + if cluster["Engine"] in ["aurora", "aurora-mysql", "aurora-postgresql"]: + if changed: + wait_for_cluster_status(client, module, cluster_id, "cluster_available") + changed |= handle_remove_from_global_db(module, cluster) result = camel_dict_to_snake_dict(get_cluster(cluster_id)) if result: - result['tags'] = get_tags(client, module, result['db_cluster_arn']) + result["tags"] = get_tags(client, module, result["db_cluster_arn"]) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py index 3135a4ce9..08789af4c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2022 Ansible Project # Copyright (c) 2022 Alina Buzachis (@alinabuzachis) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: rds_cluster_info version_added: 5.0.0 short_description: Obtain information about one or more RDS clusters @@ -32,13 +30,12 @@ options: author: - Alina Buzachis (@alinabuzachis) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Get info of all existing DB clusters amazon.aws.rds_cluster_info: register: _result_cluster_info @@ -52,9 +49,9 @@ EXAMPLES = r''' amazon.aws.rds_cluster_info: engine: "aurora" register: _result_cluster_info -''' +""" -RETURN = r''' +RETURN = r""" clusters: description: List of RDS clusters. returned: always @@ -240,7 +237,7 @@ clusters: description: Security group of the cluster. type: str sample: sg-12345678 -''' +""" try: @@ -248,32 +245,33 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff(retries=10) def _describe_db_clusters(client, **params): try: - paginator = client.get_paginator('describe_db_clusters') - return paginator.paginate(**params).build_full_result()['DBClusters'] - except is_boto3_error_code('DBClusterNotFoundFault'): + paginator = client.get_paginator("describe_db_clusters") + return paginator.paginate(**params).build_full_result()["DBClusters"] + except is_boto3_error_code("DBClusterNotFoundFault"): return [] def cluster_info(client, module): - cluster_id = module.params.get('db_cluster_identifier') - filters = module.params.get('filters') + cluster_id = module.params.get("db_cluster_identifier") + filters = module.params.get("filters") params = dict() if cluster_id: - params['DBClusterIdentifier'] = cluster_id + params["DBClusterIdentifier"] = cluster_id if filters: - params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + params["Filters"] = ansible_dict_to_boto3_filter_list(filters) try: result = _describe_db_clusters(client, **params) @@ -281,15 +279,15 @@ def cluster_info(client, module): module.fail_json_aws(e, "Couldn't get RDS cluster information.") for cluster in result: - cluster['Tags'] = get_tags(client, module, cluster['DBClusterArn']) + cluster["Tags"] = get_tags(client, module, cluster["DBClusterArn"]) - return dict(changed=False, clusters=[camel_dict_to_snake_dict(cluster, ignore_list=['Tags']) for cluster in result]) + return dict(changed=False, clusters=[camel_dict_to_snake_dict(cluster, ignore_list=["Tags"]) for cluster in result]) def main(): argument_spec = dict( - db_cluster_identifier=dict(aliases=['cluster_id', 'id', 'cluster_name']), - filters=dict(type='dict'), + db_cluster_identifier=dict(aliases=["cluster_id", "id", "cluster_name"]), + filters=dict(type="dict"), ) module = AnsibleAWSModule( @@ -298,12 +296,12 @@ def main(): ) try: - client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") module.exit_json(**cluster_info(client, module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py index ff712c438..2f0ce49ec 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2014 Ansible Project # Copyright (c) 2021 Alina Buzachis (@alinabuzachis) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_cluster_snapshot version_added: 5.0.0 @@ -73,13 +71,13 @@ notes: author: - Alina Buzachis (@alinabuzachis) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a DB cluster snapshot amazon.aws.rds_cluster_snapshot: db_cluster_identifier: "{{ cluster_id }}" @@ -97,9 +95,9 @@ EXAMPLES = r''' source_id: "{{ snapshot.db_snapshot_arn }}" source_region: us-east-2 copy_tags: true -''' +""" -RETURN = r''' +RETURN = r""" availability_zone: description: Availability zone of the database from which the snapshot was created. returned: always @@ -214,47 +212,53 @@ tags: returned: always type: complex contains: {} -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute -from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list def get_snapshot(snapshot_id): try: - snapshot = client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id, aws_retry=True)["DBClusterSnapshots"][0] + snapshot = client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id, aws_retry=True)[ + "DBClusterSnapshots" + ][0] snapshot["Tags"] = get_tags(client, module, snapshot["DBClusterSnapshotArn"]) except is_boto3_error_code("DBClusterSnapshotNotFound"): return {} except is_boto3_error_code("DBClusterSnapshotNotFoundFault"): # pylint: disable=duplicate-except return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get snapshot {snapshot_id}") return snapshot def get_parameters(parameters, method_name): - if method_name == 'copy_db_cluster_snapshot': - parameters['TargetDBClusterSnapshotIdentifier'] = module.params['db_cluster_snapshot_identifier'] + if method_name == "copy_db_cluster_snapshot": + parameters["TargetDBClusterSnapshotIdentifier"] = module.params["db_cluster_snapshot_identifier"] required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) + attribute_description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {attribute_description} requires the parameters: {required_options}") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) @@ -277,20 +281,20 @@ def ensure_snapshot_absent(): def copy_snapshot(params): changed = False - snapshot_id = module.params.get('db_cluster_snapshot_identifier') + snapshot_id = module.params.get("db_cluster_snapshot_identifier") snapshot = get_snapshot(snapshot_id) if not snapshot: - method_params = get_parameters(params, 'copy_db_cluster_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - result, changed = call_method(client, module, 'copy_db_cluster_snapshot', method_params) + method_params = get_parameters(params, "copy_db_cluster_snapshot") + if method_params.get("Tags"): + method_params["Tags"] = ansible_dict_to_boto3_tag_list(method_params["Tags"]) + _result, changed = call_method(client, module, "copy_db_cluster_snapshot", method_params) return changed def ensure_snapshot_present(params): - source_id = module.params.get('source_db_cluster_snapshot_identifier') + source_id = module.params.get("source_db_cluster_snapshot_identifier") snapshot_name = module.params.get("db_cluster_snapshot_identifier") changed = False @@ -309,14 +313,14 @@ def ensure_snapshot_present(params): changed |= modify_snapshot() snapshot = get_snapshot(snapshot_name) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"])) def create_snapshot(params): - method_params = get_parameters(params, 'create_db_cluster_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - snapshot, changed = call_method(client, module, 'create_db_cluster_snapshot', method_params) + method_params = get_parameters(params, "create_db_cluster_snapshot") + if method_params.get("Tags"): + method_params["Tags"] = ansible_dict_to_boto3_tag_list(method_params["Tags"]) + _snapshot, changed = call_method(client, module, "create_db_cluster_snapshot", method_params) return changed @@ -324,11 +328,18 @@ def create_snapshot(params): def modify_snapshot(): # TODO - add other modifications aside from purely tags changed = False - snapshot_id = module.params.get('db_cluster_snapshot_identifier') + snapshot_id = module.params.get("db_cluster_snapshot_identifier") snapshot = get_snapshot(snapshot_id) - if module.params.get('tags'): - changed |= ensure_tags(client, module, snapshot['DBClusterSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) + if module.params.get("tags"): + changed |= ensure_tags( + client, + module, + snapshot["DBClusterSnapshotArn"], + snapshot["Tags"], + module.params["tags"], + module.params["purge_tags"], + ) return changed @@ -338,16 +349,16 @@ def main(): global module argument_spec = dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - db_cluster_snapshot_identifier=dict(type='str', aliases=['id', 'snapshot_id', 'snapshot_name'], required=True), - db_cluster_identifier=dict(type='str', aliases=['cluster_id', 'cluster_name']), - source_db_cluster_snapshot_identifier=dict(type='str', aliases=['source_id', 'source_snapshot_id']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - copy_tags=dict(type='bool', default=False), - source_region=dict(type='str'), + state=dict(type="str", choices=["present", "absent"], default="present"), + db_cluster_snapshot_identifier=dict(type="str", aliases=["id", "snapshot_id", "snapshot_name"], required=True), + db_cluster_identifier=dict(type="str", aliases=["cluster_id", "cluster_name"]), + source_db_cluster_snapshot_identifier=dict(type="str", aliases=["source_id", "source_snapshot_id"]), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + copy_tags=dict(type="bool", default=False), + source_region=dict(type="str"), ) module = AnsibleAWSModule( @@ -357,7 +368,7 @@ def main(): retry_decorator = AWSRetry.jittered_backoff(retries=10) try: - client = module.client('rds', retry_decorator=retry_decorator) + client = module.client("rds", retry_decorator=retry_decorator) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to connect to AWS.") @@ -370,5 +381,5 @@ def main(): ensure_snapshot_present(params) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_global_cluster_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_global_cluster_info.py new file mode 100644 index 000000000..20200155d --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_global_cluster_info.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2023 Ansible Project +# Copyright (c) 2023 Gomathi Selvi Srinivasan (@GomathiselviS) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +module: rds_global_cluster_info +version_added: 7.0.0 +short_description: Obtain information about Aurora global database clusters +description: + - Obtain information about Aurora global database clusters. +options: + global_cluster_identifier: + description: + - The user-supplied Global DB cluster identifier. + - If this parameter is specified, information from only the specific DB cluster is returned. + - This parameter is not case-sensitive. + - If supplied, must match an existing DBClusterIdentifier. + type: str + +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) +notes: + - While developing this module, describe_global_cluster CLI did not yield any tag information. + - Consequently, the "tags" parameter is not included in this module. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Get info of all existing DB clusters + amazon.aws.rds_global_cluster_info: + register: _result_cluster_info + +- name: Get info on a specific DB cluster + amazon.aws.rds_global_cluster_info: + global_cluster_identifier: "{{ cluster_id }}" + register: _result_global_cluster_info +""" + +RETURN = r""" +global_clusters: + description: List of global clusters. + returned: always + type: list + elements: dict + contains: + global_cluster_identifier: + description: User-supplied global database cluster identifier. + type: str + sample: "ansible-test-global-cluster" + global_cluster_resource_id: + description: + - The Amazon Web Services Region-unique, immutable identifier for the global database cluster. + type: str + sample: cluster-xxx + global_cluster_arn: + description: + - The Amazon Resource Name (ARN) for the global database cluster. + type: str + sample: "arn:aws:rds::xxx:global-cluster:ansible-test-global-cluster" + status: + description: The status of the DB cluster. + type: str + sample: available + engine: + description: The database engine of the DB cluster. + type: str + sample: aurora-postgresql + engine_version: + description: The database engine version. + type: str + sample: 14.8 + storage_encrypted: + description: Whether the DB cluster is storage encrypted. + type: bool + sample: false + deletion_protection: + description: + - Indicates if the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + type: bool + sample: false + gloabl_cluster_members: + description: + - The list of primary and secondary clusters within the global database + cluster. + type: list + elements: dict + contains: + db_cluster_arn: + description: The Amazon Resource Name (ARN) for each Aurora DB cluster in the global cluster. + type: str + sample: arn:aws:rds:us-east-1:123456789012:cluster:ansible-test-primary + readers: + description: The Amazon Resource Name (ARN) for each read-only secondary cluster associated with the global cluster. + type: list + elements: str + sample: arn:aws:rds:us-east-2:123456789012:cluster:ansible-test-secondary + is_writer: + description: + - Indicates whether the Aurora DB cluster is the primary cluster for the global cluster with which it is associated. + type: bool + sample: false + global_write_forwarding_status: + description: The status of write forwarding for a secondary cluster in the global cluster. + type: str + sample: disabled + failover_state: + description: + - A data object containing all properties for the current state of an in-process or + pending switchover or failover process for this global cluster (Aurora global database). + - This object is empty unless the SwitchoverGlobalCluster or FailoverGlobalCluster operation was called on this global cluster. + type: dict + contains: + status: + description: + - The current status of the global cluster. + type: str + sample: "pending" + from_db_cluster_arn: + description: The Amazon Resource Name (ARN) of the Aurora DB cluster that is currently being demoted, and which is associated with this state. + type: str + sample: arn:aws:rds:us-east-1:123456789012:cluster:ansible-test-primary + to_db_cluster_arn: + description: The Amazon Resource Name (ARN) of the Aurora DB cluster that is currently being promoted, and which is associated with this state. + type: str + sample: arn:aws:rds:us-east-2:123456789012:cluster:ansible-test-secondary + is_data_loss_allowed: + description: + - Indicates whether the operation is a global switchover or a global failover. + - If data loss is allowed, then the operation is a global failover. Otherwise, it is a switchover. + type: bool + sample: false +""" + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_global_clusters(client, **params): + try: + paginator = client.get_paginator("describe_global_clusters") + return paginator.paginate(**params).build_full_result()["GlobalClusters"] + except is_boto3_error_code("GlobalClusterNotFoundFault"): + return [] + + +def cluster_info(client, module): + global_cluster_id = module.params.get("global_cluster_identifier") + + params = dict() + if global_cluster_id: + params["GlobalClusterIdentifier"] = global_cluster_id + + try: + result = _describe_global_clusters(client, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get Global cluster information.") + + return dict( + changed=False, global_clusters=[camel_dict_to_snake_dict(cluster, ignore_list=["Tags"]) for cluster in result] + ) + + +def main(): + argument_spec = dict( + global_cluster_identifier=dict(), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS.") + + module.exit_json(**cluster_info(client, module)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py index f1eccea3b..4451d7638 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_instance version_added: 5.0.0 @@ -15,8 +13,8 @@ description: - Create, modify, and delete RDS instances. - This module was originally added to C(community.aws) in release 1.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 author: @@ -40,17 +38,17 @@ options: type: str force_update_password: description: - - Set to C(True) to update your instance password with I(master_user_password). Since comparing passwords to determine - if it needs to be updated is not possible this is set to False by default to allow idempotence. + - Set to C(true) to update your instance password with I(master_user_password). Since comparing passwords to determine + if it needs to be updated is not possible this is set to c(false) by default to allow idempotence. type: bool - default: False + default: false purge_cloudwatch_logs_exports: - description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. + description: Set to C(false) to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. type: bool - default: True + default: true read_replica: description: - - Set to C(False) to promote a read replica instance or true to create one. When creating a read replica C(creation_source) should + - Set to C(false) to promote a read replica instance or C(true) to create one. When creating a read replica C(creation_source) should be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option. type: bool wait: @@ -59,9 +57,9 @@ options: Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting). - If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications. + If setting this to C(false) task retries and delays may make your playbook execution better handle timeouts for major modifications. type: bool - default: True + default: true # Options that have a corresponding boto3 parameter allocated_storage: @@ -75,10 +73,10 @@ options: apply_immediately: description: - A value that specifies whether modifying an instance with I(new_db_instance_identifier) and I(master_user_password) - should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes + should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If C(false), changes are applied during the next maintenance window. type: bool - default: False + default: false auto_minor_version_upgrade: description: - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window. @@ -108,7 +106,7 @@ options: copy_tags_to_snapshot: description: - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating - a DB instance the RDS API defaults this to false if unspecified. + a DB instance the RDS API defaults this to C(false) if unspecified. type: bool db_cluster_identifier: description: @@ -132,7 +130,7 @@ options: aliases: - instance_id - id - required: True + required: true type: str db_name: description: @@ -187,7 +185,7 @@ options: enable_iam_database_authentication: description: - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. - If this option is omitted when creating the instance, Amazon RDS sets this to False. + If this option is omitted when creating the instance, Amazon RDS sets this to C(false). type: bool enable_performance_insights: description: @@ -201,18 +199,18 @@ options: type: str engine_version: description: - - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12. - Aurora PostgreSQL example, 9.6.3 + - The version number of the database engine to use. For Aurora MySQL that could be C(5.6.10a) , C(5.7.12). + Aurora PostgreSQL example, C(9.6.3) type: str final_db_snapshot_identifier: description: - - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false. + - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is C(false). aliases: - final_snapshot_identifier type: str force_failover: description: - - Set to true to conduct the reboot through a MultiAZ failover. + - Set to C(true) to conduct the reboot through a MultiAZ failover. type: bool iam_roles: description: @@ -241,7 +239,7 @@ options: - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key. - - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used. + - If I(storage_encrypted) is C(true) and and this option is not provided, the default encryption key is used. type: str license_model: description: @@ -252,7 +250,7 @@ options: master_user_password: description: - An 8-41 character password for the master database user. The password can contain any printable ASCII character - except "/", """, or "@". To modify the password use I(force_update_password). Use I(apply immediately) to change + except C(/), C("), or C(@). To modify the password use I(force_update_password). Use I(apply_immediately) to change the password immediately, otherwise it is updated during the next maintenance window. aliases: - password @@ -270,7 +268,7 @@ options: monitoring_interval: description: - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting - metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance. + metrics, specify C(0). Amazon RDS defaults this to 0 if omitted when initially creating a DB instance. type: int monitoring_role_arn: description: @@ -339,22 +337,22 @@ options: type: int publicly_accessible: description: - - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with - a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal + - Specifies the accessibility options for the DB instance. A value of C(true) specifies an Internet-facing instance with + a publicly resolvable DNS name, which resolves to a public IP address. A value of C(false) specifies an internal instance with a DNS name that resolves to a private IP address. type: bool purge_iam_roles: description: - - Set to C(True) to remove any IAM roles that aren't specified in the task and are associated with the instance. + - Set to C(true) to remove any IAM roles that aren't specified in the task and are associated with the instance. type: bool - default: False + default: false version_added: 3.3.0 version_added_collection: community.aws restore_time: description: - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance. For example, "2009-09-07T23:45:00Z". - - May alternatively set I(use_latest_restore_time=True). + - May alternatively set I(use_latest_restore_time=true). - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided. type: str s3_bucket_name: @@ -373,7 +371,7 @@ options: type: str skip_final_snapshot: description: - - Whether a final DB instance snapshot is created before the DB instance is deleted. If this is false I(final_db_snapshot_identifier) + - Whether a final DB instance snapshot is created before the DB instance is deleted. If this is C(false) I(final_db_snapshot_identifier) must be provided. type: bool default: false @@ -414,7 +412,7 @@ options: - The storage throughput when the I(storage_type) is C(gp3). - When the allocated storage is below 400 GB, the storage throughput will always be 125 mb/s. - When the allocated storage is large than or equal 400 GB, the througput starts at 500 mb/s. - - Requires boto3 >= 1.26.0. + - Requires botocore >= 1.29.0. type: int version_added: 5.2.0 tde_credential_arn: @@ -449,15 +447,15 @@ options: elements: str purge_security_groups: description: - - Set to False to retain any enabled security groups that aren't specified in the task and are associated with the instance. + - Set to C(false) to retain any enabled security groups that aren't specified in the task and are associated with the instance. - Can be applied to I(vpc_security_group_ids) and I(db_security_groups) type: bool - default: True + default: true version_added: 1.5.0 version_added_collection: community.aws -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create minimal aurora instance in default VPC and default subnet group amazon.aws.rds_instance: @@ -473,7 +471,7 @@ EXAMPLES = r''' id: test-encrypted-db state: present engine: mariadb - storage_encrypted: True + storage_encrypted: true db_instance_class: db.t2.medium username: "{{ username }}" password: "{{ password }}" @@ -483,7 +481,7 @@ EXAMPLES = r''' amazon.aws.rds_instance: id: "{{ instance_id }}" state: absent - skip_final_snapshot: True + skip_final_snapshot: true - name: remove the DB instance with a final snapshot amazon.aws.rds_instance: @@ -502,7 +500,7 @@ EXAMPLES = r''' # Add IAM role to db instance - name: Create IAM policy - community.aws.iam_managed_policy: + amazon.aws.iam_managed_policy: policy_name: "my-policy" policy: "{{ lookup('file','files/policy.json') }}" state: present @@ -553,9 +551,9 @@ EXAMPLES = r''' engine: mariadb state: present register: restored_db -''' +""" -RETURN = r''' +RETURN = r""" allocated_storage: description: The allocated storage size in gigabytes. This is always 1 for aurora database engines. returned: always @@ -582,7 +580,9 @@ backup_retention_period: type: int sample: 1 ca_certificate_identifier: - description: The identifier of the CA certificate for the DB instance. + description: + - The identifier of the CA certificate for the DB instance. + - Requires minimum botocore version 1.29.44. returned: always type: str sample: rds-ca-2015 @@ -692,7 +692,7 @@ dbi_resource_id: type: str sample: db-UHV3QRNWX4KB6GALCIGRML6QFA deletion_protection: - description: C(True) if the DB instance has deletion protection enabled, C(False) if not. + description: C(true) if the DB instance has deletion protection enabled, C(False) if not. returned: always type: bool sample: False @@ -801,7 +801,7 @@ pending_modified_values: type: complex contains: {} performance_insights_enabled: - description: True if Performance Insights is enabled for the DB instance, and otherwise false. + description: true if Performance Insights is enabled for the DB instance, and otherwise false. returned: always type: bool sample: false @@ -817,7 +817,7 @@ preferred_maintenance_window: sample: sun:09:31-sun:10:01 publicly_accessible: description: - - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an + - C(True) for an Internet-facing instance with a publicly resolvable DNS name, C(False) to indicate an internal instance with a DNS name that resolves to a private IP address. returned: always type: bool @@ -857,7 +857,7 @@ vpc_security_groups: returned: always type: str sample: sg-12345678 -''' +""" from time import sleep @@ -871,13 +871,10 @@ from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method from ansible_collections.amazon.aws.plugins.module_utils.rds import compare_iam_roles @@ -886,150 +883,193 @@ from ansible_collections.amazon.aws.plugins.module_utils.rds import get_final_id from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import update_iam_roles - - -valid_engines = ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', - 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] - -valid_engines_iam_roles = ['aurora-postgresql', 'oracle-ee', 'oracle-ee-cdb', 'oracle-se2', 'oracle-se2-cdb', - 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +valid_engines = [ + "aurora", + "aurora-mysql", + "aurora-postgresql", + "mariadb", + "mysql", + "oracle-ee", + "oracle-ee-cdb", + "oracle-se2", + "oracle-se2-cdb", + "postgres", + "sqlserver-ee", + "sqlserver-se", + "sqlserver-ex", + "sqlserver-web", +] + +valid_engines_iam_roles = [ + "aurora-postgresql", + "oracle-ee", + "oracle-ee-cdb", + "oracle-se2", + "oracle-se2-cdb", + "postgres", + "sqlserver-ee", + "sqlserver-se", + "sqlserver-ex", + "sqlserver-web", +] def get_rds_method_attribute_name(instance, state, creation_source, read_replica): method_name = None - if state == 'absent' or state == 'terminated': - if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']: - method_name = 'delete_db_instance' + if state == "absent" or state == "terminated": + if instance and instance["DBInstanceStatus"] not in ["deleting", "deleted"]: + method_name = "delete_db_instance" else: if instance: - method_name = 'modify_db_instance' + method_name = "modify_db_instance" elif read_replica is True: - method_name = 'create_db_instance_read_replica' - elif creation_source == 'snapshot': - method_name = 'restore_db_instance_from_db_snapshot' - elif creation_source == 's3': - method_name = 'restore_db_instance_from_s3' - elif creation_source == 'instance': - method_name = 'restore_db_instance_to_point_in_time' + method_name = "create_db_instance_read_replica" + elif creation_source == "snapshot": + method_name = "restore_db_instance_from_db_snapshot" + elif creation_source == "s3": + method_name = "restore_db_instance_from_s3" + elif creation_source == "instance": + method_name = "restore_db_instance_to_point_in_time" else: - method_name = 'create_db_instance' + method_name = "create_db_instance" return method_name def get_instance(client, module, db_instance_id): try: - for i in range(3): + for _i in range(3): try: - instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0] - instance['Tags'] = get_tags(client, module, instance['DBInstanceArn']) - if instance.get('ProcessorFeatures'): - instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures']) - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - instance['PendingModifiedValues']['ProcessorFeatures'] = dict( - (feature['Name'], feature['Value']) - for feature in instance['PendingModifiedValues']['ProcessorFeatures'] + instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)["DBInstances"][0] + instance["Tags"] = get_tags(client, module, instance["DBInstanceArn"]) + if instance.get("ProcessorFeatures"): + instance["ProcessorFeatures"] = dict( + (feature["Name"], feature["Value"]) for feature in instance["ProcessorFeatures"] + ) + if instance.get("PendingModifiedValues", {}).get("ProcessorFeatures"): + instance["PendingModifiedValues"]["ProcessorFeatures"] = dict( + (feature["Name"], feature["Value"]) + for feature in instance["PendingModifiedValues"]["ProcessorFeatures"] ) break - except is_boto3_error_code('DBInstanceNotFound'): + except is_boto3_error_code("DBInstanceNotFound"): sleep(3) else: instance = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to describe DB instances') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe DB instances") return instance def get_final_snapshot(client, module, snapshot_identifier): try: snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier) - if len(snapshots.get('DBSnapshots', [])) == 1: - return snapshots['DBSnapshots'][0] + if len(snapshots.get("DBSnapshots", [])) == 1: + return snapshots["DBSnapshots"][0] return {} - except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True + except is_boto3_error_code("DBSnapshotNotFound"): # May not be using wait: True return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to retrieve information about the final snapshot") def get_parameters(client, module, parameters, method_name): - if method_name == 'restore_db_instance_to_point_in_time': - parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier'] + if method_name == "restore_db_instance_to_point_in_time": + parameters["TargetDBInstanceIdentifier"] = module.params["db_instance_identifier"] required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) + description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {description} requires the parameters: {required_options}") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) - if parameters.get('ProcessorFeatures') is not None: - parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()] + if parameters.get("ProcessorFeatures") is not None: + parameters["ProcessorFeatures"] = [ + {"Name": k, "Value": to_text(v)} for k, v in parameters["ProcessorFeatures"].items() + ] # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures) - if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance': - parameters.pop('ProcessorFeatures') + if parameters.get("ProcessorFeatures") == [] and not method_name == "modify_db_instance": + parameters.pop("ProcessorFeatures") - if method_name in ['create_db_instance', 'create_db_instance_read_replica', 'restore_db_instance_from_db_snapshot']: - if parameters.get('Tags'): - parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + if method_name in ["create_db_instance", "create_db_instance_read_replica", "restore_db_instance_from_db_snapshot"]: + if parameters.get("Tags"): + parameters["Tags"] = ansible_dict_to_boto3_tag_list(parameters["Tags"]) - if method_name == 'modify_db_instance': + if method_name == "modify_db_instance": parameters = get_options_with_changing_values(client, module, parameters) return parameters def get_options_with_changing_values(client, module, parameters): - instance_id = module.params['db_instance_identifier'] - purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports'] - force_update_password = module.params['force_update_password'] - port = module.params['port'] - apply_immediately = parameters.pop('ApplyImmediately', None) - cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports'] - purge_security_groups = module.params['purge_security_groups'] - + instance_id = module.params["db_instance_identifier"] + purge_cloudwatch_logs = module.params["purge_cloudwatch_logs_exports"] + force_update_password = module.params["force_update_password"] + port = module.params["port"] + apply_immediately = parameters.pop("ApplyImmediately", None) + cloudwatch_logs_enabled = module.params["enable_cloudwatch_logs_exports"] + purge_security_groups = module.params["purge_security_groups"] + ca_certificate_identifier = module.params["ca_certificate_identifier"] + + if ca_certificate_identifier: + parameters["CACertificateIdentifier"] = ca_certificate_identifier if port: - parameters['DBPortNumber'] = port + parameters["DBPortNumber"] = port if not force_update_password: - parameters.pop('MasterUserPassword', None) + parameters.pop("MasterUserPassword", None) if cloudwatch_logs_enabled: - parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled - if not module.params['storage_type']: - parameters.pop('Iops', None) + parameters["CloudwatchLogsExportConfiguration"] = cloudwatch_logs_enabled + if not module.params["storage_type"]: + parameters.pop("Iops", None) instance = get_instance(client, module, instance_id) - updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs, purge_security_groups) + updated_parameters = get_changing_options_with_inconsistent_keys( + parameters, instance, purge_cloudwatch_logs, purge_security_groups + ) updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) parameters = updated_parameters - if instance.get('StorageType') == 'io1': + if instance.get("StorageType") == "io1": # Bundle Iops and AllocatedStorage while updating io1 RDS Instance - current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) - current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) - new_iops = module.params.get('iops') - new_allocated_storage = module.params.get('allocated_storage') + current_iops = instance.get("PendingModifiedValues", {}).get("Iops", instance["Iops"]) + current_allocated_storage = instance.get("PendingModifiedValues", {}).get( + "AllocatedStorage", instance["AllocatedStorage"] + ) + new_iops = module.params.get("iops") + new_allocated_storage = module.params.get("allocated_storage") if current_iops != new_iops or current_allocated_storage != new_allocated_storage: - parameters['AllocatedStorage'] = new_allocated_storage - parameters['Iops'] = new_iops - - if instance.get('StorageType') == 'gp3': - if module.boto3_at_least('1.26.0'): - GP3_THROUGHPUT = True - current_storage_throughput = instance.get('PendingModifiedValues', {}).get('StorageThroughput', instance['StorageThroughput']) - new_storage_throughput = module.params.get('storage_throughput') or current_storage_throughput - if new_storage_throughput != current_storage_throughput: - parameters['StorageThroughput'] = new_storage_throughput - else: - GP3_THROUGHPUT = False - module.warn('gp3 volumes require boto3 >= 1.26.0. storage_throughput will be ignored.') - - current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) + parameters["AllocatedStorage"] = new_allocated_storage + parameters["Iops"] = new_iops + + if instance.get("StorageType") == "gp3": + GP3_THROUGHPUT = True + current_storage_throughput = instance.get("PendingModifiedValues", {}).get( + "StorageThroughput", instance["StorageThroughput"] + ) + new_storage_throughput = module.params.get("storage_throughput") or current_storage_throughput + if new_storage_throughput != current_storage_throughput: + parameters["StorageThroughput"] = new_storage_throughput + + current_iops = instance.get("PendingModifiedValues", {}).get("Iops", instance["Iops"]) # when you just change from gp2 to gp3, you may not add the iops parameter - new_iops = module.params.get('iops') or current_iops + new_iops = module.params.get("iops") or current_iops - new_allocated_storage = module.params.get('allocated_storage') - current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) + new_allocated_storage = module.params.get("allocated_storage") + current_allocated_storage = instance.get("PendingModifiedValues", {}).get( + "AllocatedStorage", instance["AllocatedStorage"] + ) if new_allocated_storage: if current_allocated_storage != new_allocated_storage: @@ -1043,7 +1083,10 @@ def get_options_with_changing_values(client, module, parameters): if new_storage_throughput < 500 and GP3_THROUGHPUT: module.fail_json( - msg="Storage Throughput must be at least 500 when the allocated storage is larger than or equal to 400 GB." + msg=( + "Storage Throughput must be at least 500 when the allocated storage is larger than or equal" + " to 400 GB." + ) ) if current_iops != new_iops: @@ -1051,50 +1094,67 @@ def get_options_with_changing_values(client, module, parameters): # must be always specified when changing iops parameters["AllocatedStorage"] = new_allocated_storage - if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'): - if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately: - parameters.pop('NewDBInstanceIdentifier') + if parameters.get("NewDBInstanceIdentifier") and instance.get("PendingModifiedValues", {}).get( + "DBInstanceIdentifier" + ): + if ( + parameters["NewDBInstanceIdentifier"] == instance["PendingModifiedValues"]["DBInstanceIdentifier"] + and not apply_immediately + ): + parameters.pop("NewDBInstanceIdentifier") if parameters: - parameters['DBInstanceIdentifier'] = instance_id + parameters["DBInstanceIdentifier"] = instance_id if apply_immediately is not None: - parameters['ApplyImmediately'] = apply_immediately + parameters["ApplyImmediately"] = apply_immediately return parameters def get_current_attributes_with_inconsistent_keys(instance): options = {} - if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []): - current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable'] - current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable'] - options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled} + if instance.get("PendingModifiedValues", {}).get("PendingCloudwatchLogsExports", {}).get("LogTypesToEnable", []): + current_enabled = instance["PendingModifiedValues"]["PendingCloudwatchLogsExports"]["LogTypesToEnable"] + current_disabled = instance["PendingModifiedValues"]["PendingCloudwatchLogsExports"]["LogTypesToDisable"] + options["CloudwatchLogsExportConfiguration"] = { + "LogTypesToEnable": current_enabled, + "LogTypesToDisable": current_disabled, + } else: - options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []} - if instance.get('PendingModifiedValues', {}).get('Port'): - options['DBPortNumber'] = instance['PendingModifiedValues']['Port'] + options["CloudwatchLogsExportConfiguration"] = { + "LogTypesToEnable": instance.get("EnabledCloudwatchLogsExports", []), + "LogTypesToDisable": [], + } + if instance.get("PendingModifiedValues", {}).get("Port"): + options["DBPortNumber"] = instance["PendingModifiedValues"]["Port"] else: - options['DBPortNumber'] = instance['Endpoint']['Port'] - if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'): - options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName'] + options["DBPortNumber"] = instance["Endpoint"]["Port"] + if instance.get("PendingModifiedValues", {}).get("DBSubnetGroupName"): + options["DBSubnetGroupName"] = instance["PendingModifiedValues"]["DBSubnetGroupName"] else: - options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName'] - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures'] + options["DBSubnetGroupName"] = instance["DBSubnetGroup"]["DBSubnetGroupName"] + if instance.get("PendingModifiedValues", {}).get("ProcessorFeatures"): + options["ProcessorFeatures"] = instance["PendingModifiedValues"]["ProcessorFeatures"] else: - options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {}) - options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']] - options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']] - options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']] - options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']] - options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled'] + options["ProcessorFeatures"] = instance.get("ProcessorFeatures", {}) + options["OptionGroupName"] = [g["OptionGroupName"] for g in instance["OptionGroupMemberships"]] + options["DBSecurityGroups"] = [ + sg["DBSecurityGroupName"] for sg in instance["DBSecurityGroups"] if sg["Status"] in ["adding", "active"] + ] + options["VpcSecurityGroupIds"] = [ + sg["VpcSecurityGroupId"] for sg in instance["VpcSecurityGroups"] if sg["Status"] in ["adding", "active"] + ] + options["DBParameterGroupName"] = [ + parameter_group["DBParameterGroupName"] for parameter_group in instance["DBParameterGroups"] + ] + options["EnableIAMDatabaseAuthentication"] = instance["IAMDatabaseAuthenticationEnabled"] # PerformanceInsightsEnabled is not returned on older RDS instances it seems - options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False) - options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier'] + options["EnablePerformanceInsights"] = instance.get("PerformanceInsightsEnabled", False) + options["NewDBInstanceIdentifier"] = instance["DBInstanceIdentifier"] # Neither of these are returned via describe_db_instances, so if either is specified during a check_mode run, changed=True - options['AllowMajorVersionUpgrade'] = None - options['MasterUserPassword'] = None + options["AllowMajorVersionUpgrade"] = None + options["MasterUserPassword"] = None return options @@ -1112,8 +1172,9 @@ def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_c if isinstance(current_option, list): if isinstance(desired_option, list): if ( - set(desired_option) < set(current_option) and - option in ('DBSecurityGroups', 'VpcSecurityGroupIds',) and purge_security_groups + set(desired_option) < set(current_option) + and option in ["DBSecurityGroups", "VpcSecurityGroupIds"] + and purge_security_groups ): changing_params[option] = desired_option elif set(desired_option) <= set(current_option): @@ -1123,25 +1184,27 @@ def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_c continue # Current option and desired option are the same - continue loop - if option != 'ProcessorFeatures' and current_option == desired_option: + if option != "ProcessorFeatures" and current_option == desired_option: continue - if option == 'ProcessorFeatures' and current_option == boto3_tag_list_to_ansible_dict(desired_option, 'Name', 'Value'): + if option == "ProcessorFeatures" and current_option == boto3_tag_list_to_ansible_dict( + desired_option, "Name", "Value" + ): continue # Current option and desired option are different - add to changing_params list - if option == 'ProcessorFeatures' and desired_option == []: - changing_params['UseDefaultProcessorFeatures'] = True - elif option == 'CloudwatchLogsExportConfiguration': - current_option = set(current_option.get('LogTypesToEnable', [])) + if option == "ProcessorFeatures" and desired_option == []: + changing_params["UseDefaultProcessorFeatures"] = True + elif option == "CloudwatchLogsExportConfiguration": + current_option = set(current_option.get("LogTypesToEnable", [])) desired_option = set(desired_option) - format_option = {'EnableLogTypes': [], 'DisableLogTypes': []} - format_option['EnableLogTypes'] = list(desired_option.difference(current_option)) + format_option = {"EnableLogTypes": [], "DisableLogTypes": []} + format_option["EnableLogTypes"] = list(desired_option.difference(current_option)) if purge_cloudwatch_logs: - format_option['DisableLogTypes'] = list(current_option.difference(desired_option)) - if format_option['EnableLogTypes'] or format_option['DisableLogTypes']: + format_option["DisableLogTypes"] = list(current_option.difference(desired_option)) + if format_option["EnableLogTypes"] or format_option["DisableLogTypes"]: changing_params[option] = format_option - elif option in ('DBSecurityGroups', 'VpcSecurityGroupIds',): + elif option in ["DBSecurityGroups", "VpcSecurityGroupIds"]: if purge_security_groups: changing_params[option] = desired_option else: @@ -1156,7 +1219,7 @@ def get_changing_options_with_consistent_keys(modify_params, instance): changing_params = {} for param in modify_params: - current_option = instance.get('PendingModifiedValues', {}).get(param, None) + current_option = instance.get("PendingModifiedValues", {}).get(param, None) if current_option is None: current_option = instance.get(param, None) if modify_params[param] != current_option: @@ -1166,19 +1229,15 @@ def get_changing_options_with_consistent_keys(modify_params, instance): def validate_options(client, module, instance): - state = module.params['state'] - skip_final_snapshot = module.params['skip_final_snapshot'] - snapshot_id = module.params['final_db_snapshot_identifier'] - modified_id = module.params['new_db_instance_identifier'] - engine = module.params['engine'] - tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn']) - read_replica = module.params['read_replica'] - creation_source = module.params['creation_source'] - source_instance = module.params['source_db_instance_identifier'] - if module.params['source_region'] is not None: - same_region = bool(module.params['source_region'] == module.params['region']) - else: - same_region = True + state = module.params["state"] + skip_final_snapshot = module.params["skip_final_snapshot"] + snapshot_id = module.params["final_db_snapshot_identifier"] + modified_id = module.params["new_db_instance_identifier"] + engine = module.params["engine"] + tde_options = bool(module.params["tde_credential_password"] or module.params["tde_credential_arn"]) + read_replica = module.params["read_replica"] + creation_source = module.params["creation_source"] + source_instance = module.params["source_db_instance_identifier"] if modified_id: modified_instance = get_instance(client, module, modified_id) @@ -1186,17 +1245,26 @@ def validate_options(client, module, instance): modified_instance = {} if modified_id and instance and modified_instance: - module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id)) + module.fail_json(msg=f"A new instance ID {modified_id} was provided but it already exists") if modified_id and not instance and modified_instance: - module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id)) - if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None: - module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier') - if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options: - module.fail_json(msg='TDE is available for MySQL and Oracle DB instances') - if read_replica is True and not instance and creation_source not in [None, 'instance']: - module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source)) + module.fail_json( + msg=f"A new instance ID {modified_id} was provided but the instance to be renamed does not exist" + ) + if state in ("absent", "terminated") and instance and not skip_final_snapshot and snapshot_id is None: + module.fail_json( + msg="skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier" + ) + if engine is not None and not (engine.startswith("mysql") or engine.startswith("oracle")) and tde_options: + module.fail_json(msg="TDE is available for MySQL and Oracle DB instances") + if read_replica is True and not instance and creation_source not in [None, "instance"]: + module.fail_json(msg=f"Cannot create a read replica from {creation_source}. You must use a source DB instance") if read_replica is True and not instance and not source_instance: - module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier') + module.fail_json( + msg=( + "read_replica is true and the instance does not exist yet but all of the following are missing:" + " source_db_instance_identifier" + ) + ) def update_instance(client, module, instance, instance_id): @@ -1208,10 +1276,10 @@ def update_instance(client, module, instance, instance_id): # Check tagging/promoting/rebooting/starting/stopping instance changed |= ensure_tags( - client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags'] + client, module, instance["DBInstanceArn"], instance["Tags"], module.params["tags"], module.params["purge_tags"] ) - changed |= promote_replication_instance(client, module, instance, module.params['read_replica']) - changed |= update_instance_state(client, module, instance, module.params['state']) + changed |= promote_replication_instance(client, module, instance, module.params["read_replica"]) + changed |= update_instance_state(client, module, instance, module.params["state"]) return changed @@ -1221,17 +1289,21 @@ def promote_replication_instance(client, module, instance, read_replica): if read_replica is False: # 'StatusInfos' only exists when the instance is a read replica # See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-instances.html - if bool(instance.get('StatusInfos')): + if bool(instance.get("StatusInfos")): try: - result, changed = call_method(client, module, method_name='promote_read_replica', - parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) - except is_boto3_error_message('DB Instance is not a read replica'): + _result, changed = call_method( + client, + module, + method_name="promote_read_replica", + parameters={"DBInstanceIdentifier": instance["DBInstanceIdentifier"]}, + ) + except is_boto3_error_message("DB Instance is not a read replica"): pass return changed def ensure_iam_roles(client, module, instance_id): - ''' + """ Ensure specified IAM roles are associated with DB instance Parameters: @@ -1241,18 +1313,22 @@ def ensure_iam_roles(client, module, instance_id): Returns: changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not - ''' - instance = camel_dict_to_snake_dict(get_instance(client, module, instance_id), ignore_list=['Tags', 'ProcessorFeatures']) + """ + instance = camel_dict_to_snake_dict( + get_instance(client, module, instance_id), ignore_list=["Tags", "ProcessorFeatures"] + ) # Ensure engine type supports associating IAM roles - engine = instance.get('engine') + engine = instance.get("engine") if engine not in valid_engines_iam_roles: - module.fail_json(msg='DB engine {0} is not valid for adding IAM roles. Valid engines are {1}'.format(engine, valid_engines_iam_roles)) + module.fail_json( + msg=f"DB engine {engine} is not valid for adding IAM roles. Valid engines are {valid_engines_iam_roles}" + ) changed = False - purge_iam_roles = module.params.get('purge_iam_roles') - target_roles = module.params.get('iam_roles') if module.params.get('iam_roles') else [] - existing_roles = instance.get('associated_roles', []) + purge_iam_roles = module.params.get("purge_iam_roles") + target_roles = module.params.get("iam_roles") if module.params.get("iam_roles") else [] + existing_roles = instance.get("associated_roles", []) roles_to_add, roles_to_remove = compare_iam_roles(existing_roles, target_roles, purge_iam_roles) if bool(roles_to_add or roles_to_remove): changed = True @@ -1266,87 +1342,90 @@ def ensure_iam_roles(client, module, instance_id): def update_instance_state(client, module, instance, state): changed = False - if state in ['rebooted', 'restarted']: + if state in ["rebooted", "restarted"]: changed |= reboot_running_db_instance(client, module, instance) - if state in ['started', 'running', 'stopped']: + if state in ["started", "running", "stopped"]: changed |= start_or_stop_instance(client, module, instance, state) return changed def reboot_running_db_instance(client, module, instance): - parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} - if instance['DBInstanceStatus'] in ['stopped', 'stopping']: - call_method(client, module, 'start_db_instance', parameters) - if module.params.get('force_failover') is not None: - parameters['ForceFailover'] = module.params['force_failover'] - results, changed = call_method(client, module, 'reboot_db_instance', parameters) + parameters = {"DBInstanceIdentifier": instance["DBInstanceIdentifier"]} + if instance["DBInstanceStatus"] in ["stopped", "stopping"]: + call_method(client, module, "start_db_instance", parameters) + if module.params.get("force_failover") is not None: + parameters["ForceFailover"] = module.params["force_failover"] + _results, changed = call_method(client, module, "reboot_db_instance", parameters) return changed def start_or_stop_instance(client, module, instance, state): changed = False - parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} - if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']: - if module.params['db_snapshot_identifier']: - parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] - result, changed = call_method(client, module, 'stop_db_instance', parameters) - elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']: - result, changed = call_method(client, module, 'start_db_instance', parameters) + parameters = {"DBInstanceIdentifier": instance["DBInstanceIdentifier"]} + if state == "stopped" and instance["DBInstanceStatus"] not in ["stopping", "stopped"]: + if module.params["db_snapshot_identifier"]: + parameters["DBSnapshotIdentifier"] = module.params["db_snapshot_identifier"] + _result, changed = call_method(client, module, "stop_db_instance", parameters) + elif state == "started" and instance["DBInstanceStatus"] not in ["available", "starting", "restarting"]: + _result, changed = call_method(client, module, "start_db_instance", parameters) return changed def main(): arg_spec = dict( - state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'), - creation_source=dict(choices=['snapshot', 's3', 'instance']), - force_update_password=dict(type='bool', default=False, no_log=False), - purge_cloudwatch_logs_exports=dict(type='bool', default=True), - purge_iam_roles=dict(type='bool', default=False), - purge_tags=dict(type='bool', default=True), - read_replica=dict(type='bool'), - wait=dict(type='bool', default=True), - purge_security_groups=dict(type='bool', default=True), + state=dict( + choices=["present", "absent", "terminated", "running", "started", "stopped", "rebooted", "restarted"], + default="present", + ), + creation_source=dict(choices=["snapshot", "s3", "instance"]), + force_update_password=dict(type="bool", default=False, no_log=False), + purge_cloudwatch_logs_exports=dict(type="bool", default=True), + purge_iam_roles=dict(type="bool", default=False), + purge_tags=dict(type="bool", default=True), + read_replica=dict(type="bool"), + wait=dict(type="bool", default=True), + purge_security_groups=dict(type="bool", default=True), ) parameter_options = dict( - allocated_storage=dict(type='int'), - allow_major_version_upgrade=dict(type='bool'), - apply_immediately=dict(type='bool', default=False), - auto_minor_version_upgrade=dict(type='bool'), - availability_zone=dict(aliases=['az', 'zone']), - backup_retention_period=dict(type='int'), - ca_certificate_identifier=dict(), + allocated_storage=dict(type="int"), + allow_major_version_upgrade=dict(type="bool"), + apply_immediately=dict(type="bool", default=False), + auto_minor_version_upgrade=dict(type="bool"), + availability_zone=dict(aliases=["az", "zone"]), + backup_retention_period=dict(type="int"), + ca_certificate_identifier=dict(type="str"), character_set_name=dict(), - copy_tags_to_snapshot=dict(type='bool'), - db_cluster_identifier=dict(aliases=['cluster_id']), - db_instance_class=dict(aliases=['class', 'instance_type']), - db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']), + copy_tags_to_snapshot=dict(type="bool"), + db_cluster_identifier=dict(aliases=["cluster_id"]), + db_instance_class=dict(aliases=["class", "instance_type"]), + db_instance_identifier=dict(required=True, aliases=["instance_id", "id"]), db_name=dict(), db_parameter_group_name=dict(), - db_security_groups=dict(type='list', elements='str'), - db_snapshot_identifier=dict(type='str', aliases=['snapshot_identifier', 'snapshot_id']), - db_subnet_group_name=dict(aliases=['subnet_group']), - deletion_protection=dict(type='bool'), + db_security_groups=dict(type="list", elements="str"), + db_snapshot_identifier=dict(type="str", aliases=["snapshot_identifier", "snapshot_id"]), + db_subnet_group_name=dict(aliases=["subnet_group"]), + deletion_protection=dict(type="bool"), domain=dict(), domain_iam_role_name=dict(), - enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'), - enable_iam_database_authentication=dict(type='bool'), - enable_performance_insights=dict(type='bool'), - engine=dict(type='str', choices=valid_engines), + enable_cloudwatch_logs_exports=dict(type="list", aliases=["cloudwatch_log_exports"], elements="str"), + enable_iam_database_authentication=dict(type="bool"), + enable_performance_insights=dict(type="bool"), + engine=dict(type="str", choices=valid_engines), engine_version=dict(), - final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), - force_failover=dict(type='bool'), - iam_roles=dict(type='list', elements='dict'), - iops=dict(type='int'), + final_db_snapshot_identifier=dict(aliases=["final_snapshot_identifier"]), + force_failover=dict(type="bool"), + iam_roles=dict(type="list", elements="dict"), + iops=dict(type="int"), kms_key_id=dict(), license_model=dict(), - master_user_password=dict(aliases=['password'], no_log=True), - master_username=dict(aliases=['username']), - max_allocated_storage=dict(type='int'), - monitoring_interval=dict(type='int'), + master_user_password=dict(aliases=["password"], no_log=True), + master_username=dict(aliases=["username"]), + max_allocated_storage=dict(type="int"), + monitoring_interval=dict(type="int"), monitoring_role_arn=dict(), - multi_az=dict(type='bool'), - new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']), + multi_az=dict(type="bool"), + new_db_instance_identifier=dict(aliases=["new_instance_id", "new_id"]), option_group_name=dict(), performance_insights_kms_key_id=dict(), performance_insights_retention_period=dict(type="int"), @@ -1354,128 +1433,155 @@ def main(): preferred_backup_window=dict(aliases=["backup_window"]), preferred_maintenance_window=dict(aliases=["maintenance_window"]), processor_features=dict(type="dict"), - promotion_tier=dict(type='int'), + promotion_tier=dict(type="int"), publicly_accessible=dict(type="bool"), restore_time=dict(), s3_bucket_name=dict(), s3_ingestion_role_arn=dict(), s3_prefix=dict(), - skip_final_snapshot=dict(type='bool', default=False), + skip_final_snapshot=dict(type="bool", default=False), source_db_instance_identifier=dict(), - source_engine=dict(choices=['mysql']), + source_engine=dict(choices=["mysql"]), source_engine_version=dict(), source_region=dict(), - storage_encrypted=dict(type='bool'), - storage_type=dict(choices=['standard', 'gp2', 'gp3', 'io1']), - storage_throughput=dict(type='int'), - tags=dict(type='dict', aliases=['resource_tags']), - tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']), - tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), + storage_encrypted=dict(type="bool"), + storage_type=dict(choices=["standard", "gp2", "gp3", "io1"]), + storage_throughput=dict(type="int"), + tags=dict(type="dict", aliases=["resource_tags"]), + tde_credential_arn=dict(aliases=["transparent_data_encryption_arn"]), + tde_credential_password=dict(no_log=True, aliases=["transparent_data_encryption_password"]), timezone=dict(), - use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']), - vpc_security_group_ids=dict(type='list', elements='str') + use_latest_restorable_time=dict(type="bool", aliases=["restore_from_latest"]), + vpc_security_group_ids=dict(type="list", elements="str"), ) arg_spec.update(parameter_options) + required_if_s3_creation_source = [ + "s3_bucket_name", + "engine", + "master_username", + "master_user_password", + "source_engine", + "source_engine_version", + "s3_ingestion_role_arn", + ] + required_if = [ - ('engine', 'aurora', ('db_cluster_identifier',)), - ('engine', 'aurora-mysql', ('db_cluster_identifier',)), - ('engine', 'aurora-postresql', ('db_cluster_identifier',)), - ('storage_type', 'io1', ('iops', 'allocated_storage')), - ('creation_source', 'snapshot', ('db_snapshot_identifier', 'engine')), - ('creation_source', 's3', ( - 's3_bucket_name', 'engine', 'master_username', 'master_user_password', - 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ["engine", "aurora", ["db_cluster_identifier"]], + ["engine", "aurora-mysql", ["db_cluster_identifier"]], + ["engine", "aurora-postresql", ["db_cluster_identifier"]], + ["storage_type", "io1", ["iops", "allocated_storage"]], + ["creation_source", "snapshot", ["db_snapshot_identifier", "engine"]], + ["creation_source", "s3", required_if_s3_creation_source], ] mutually_exclusive = [ - ('s3_bucket_name', 'source_db_instance_identifier', 'db_snapshot_identifier'), - ('use_latest_restorable_time', 'restore_time'), - ('availability_zone', 'multi_az'), + ["s3_bucket_name", "source_db_instance_identifier", "db_snapshot_identifier"], + ["use_latest_restorable_time", "restore_time"], + ["availability_zone", "multi_az"], ] module = AnsibleAWSModule( argument_spec=arg_spec, required_if=required_if, mutually_exclusive=mutually_exclusive, - supports_check_mode=True + supports_check_mode=True, ) + if module.params["ca_certificate_identifier"]: + module.require_botocore_at_least( + "1.29.44", reason="to use 'ca_certificate_identifier' while creating/updating rds instance" + ) + # Sanitize instance identifiers - module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower() - if module.params['new_db_instance_identifier']: - module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower() + module.params["db_instance_identifier"] = module.params["db_instance_identifier"].lower() + if module.params["new_db_instance_identifier"]: + module.params["new_db_instance_identifier"] = module.params["new_db_instance_identifier"].lower() # Sanitize processor features - if module.params['processor_features'] is not None: - module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items()) + if module.params["processor_features"] is not None: + module.params["processor_features"] = dict( + (k, to_text(v)) for k, v in module.params["processor_features"].items() + ) # Ensure dates are in lowercase - if module.params['preferred_maintenance_window']: - module.params['preferred_maintenance_window'] = module.params['preferred_maintenance_window'].lower() + if module.params["preferred_maintenance_window"]: + module.params["preferred_maintenance_window"] = module.params["preferred_maintenance_window"].lower() # Throw warning regarding case when allow_major_version_upgrade is specified in check_mode # describe_rds_instance never returns this value, so on check_mode, it will always return changed=True # In non-check mode runs, changed will return the correct value, so no need to warn there. # see: amazon.aws.module_util.rds.handle_errors. - if module.params.get('allow_major_version_upgrade') and module.check_mode: - module.warn('allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True` on check mode runs.') + if module.params.get("allow_major_version_upgrade") and module.check_mode: + module.warn( + "allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True`" + " on check mode runs." + ) - client = module.client('rds') + client = module.client("rds") changed = False - state = module.params['state'] - instance_id = module.params['db_instance_identifier'] + state = module.params["state"] + instance_id = module.params["db_instance_identifier"] instance = get_instance(client, module, instance_id) validate_options(client, module, instance) - method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica']) + method_name = get_rds_method_attribute_name( + instance, state, module.params["creation_source"], module.params["read_replica"] + ) if method_name: - # Exit on create/delete if check_mode - if module.check_mode and method_name in ['create_db_instance', 'delete_db_instance']: - module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) - - raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + if module.check_mode and method_name in ["create_db_instance", "delete_db_instance"]: + module.exit_json( + changed=True, **camel_dict_to_snake_dict(instance, ignore_list=["Tags", "ProcessorFeatures"]) + ) + + raw_parameters = arg_spec_to_rds_params( + dict((k, module.params[k]) for k in module.params if k in parameter_options) + ) parameters_to_modify = get_parameters(client, module, raw_parameters, method_name) if parameters_to_modify: # Exit on check_mode when parameters to modify if module.check_mode: - module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) - result, changed = call_method(client, module, method_name, parameters_to_modify) + module.exit_json( + changed=True, **camel_dict_to_snake_dict(instance, ignore_list=["Tags", "ProcessorFeatures"]) + ) + _result, changed = call_method(client, module, method_name, parameters_to_modify) instance_id = get_final_identifier(method_name, module) - if state != 'absent': + if state != "absent": # Check tagging/promoting/rebooting/starting/stopping instance if not module.check_mode or instance: changed |= update_instance(client, module, instance, instance_id) # Check IAM roles - if module.params.get('iam_roles') or module.params.get('purge_iam_roles'): + if module.params.get("iam_roles") or module.params.get("purge_iam_roles"): changed |= ensure_iam_roles(client, module, instance_id) if changed: instance = get_instance(client, module, instance_id) - if state != 'absent' and (instance or not module.check_mode): - for attempt_to_wait in range(0, 10): + if state != "absent" and (instance or not module.check_mode): + for _wait_attempt in range(0, 10): instance = get_instance(client, module, instance_id) if instance: break else: sleep(5) - if state == 'absent' and changed and not module.params['skip_final_snapshot']: - instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier'])) + if state == "absent" and changed and not module.params["skip_final_snapshot"]: + instance.update( + FinalSnapshot=get_final_snapshot(client, module, module.params["final_db_snapshot_identifier"]) + ) pending_processor_features = None - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures') - instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']) + if instance.get("PendingModifiedValues", {}).get("ProcessorFeatures"): + pending_processor_features = instance["PendingModifiedValues"].pop("ProcessorFeatures") + instance = camel_dict_to_snake_dict(instance, ignore_list=["Tags", "ProcessorFeatures"]) if pending_processor_features is not None: - instance['pending_modified_values']['processor_features'] = pending_processor_features + instance["pending_modified_values"]["processor_features"] = pending_processor_features module.exit_json(changed=changed, **instance) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py index 6996b6115..36c6d1b9c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017, 2018 Michael De La Rue # Copyright (c) 2017, 2018 Will Thames # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: rds_instance_info version_added: 5.0.0 @@ -33,13 +31,12 @@ author: - "Will Thames (@willthames)" - "Michael De La Rue (@mikedlr)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about an instance amazon.aws.rds_instance_info: db_instance_identifier: new-database @@ -47,9 +44,9 @@ EXAMPLES = ''' - name: Get all RDS instances amazon.aws.rds_instance_info: -''' +""" -RETURN = ''' +RETURN = r""" instances: description: List of RDS instances returned: always @@ -352,15 +349,15 @@ instances: returned: always type: str sample: sg-abcd1234 -''' +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - AWSRetry, - camel_dict_to_snake_dict, - ) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list try: import botocore @@ -370,44 +367,54 @@ except ImportError: @AWSRetry.jittered_backoff() def _describe_db_instances(conn, **params): - paginator = conn.get_paginator('describe_db_instances') + paginator = conn.get_paginator("describe_db_instances") try: - results = paginator.paginate(**params).build_full_result()['DBInstances'] - except is_boto3_error_code('DBInstanceNotFound'): + results = paginator.paginate(**params).build_full_result()["DBInstances"] + except is_boto3_error_code("DBInstanceNotFound"): results = [] return results -def instance_info(module, conn): - instance_name = module.params.get('db_instance_identifier') - filters = module.params.get('filters') +class RdsInstanceInfoFailure(Exception): + def __init__(self, original_e, user_message): + self.original_e = original_e + self.user_message = user_message + super().__init__(self) + - params = dict() +def get_instance_tags(conn, arn): + try: + return boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=arn, aws_retry=True)["TagList"]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise RdsInstanceInfoFailure(e, f"Couldn't get tags for instance {arn}") + + +def instance_info(conn, instance_name, filters): + params = {} if instance_name: - params['DBInstanceIdentifier'] = instance_name + params["DBInstanceIdentifier"] = instance_name if filters: - params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + params["Filters"] = ansible_dict_to_boto3_filter_list(filters) try: results = _describe_db_instances(conn, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get instance information") + raise RdsInstanceInfoFailure(e, "Couldn't get instance information") for instance in results: - try: - instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'], - aws_retry=True)['TagList']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier']) + instance["Tags"] = get_instance_tags(conn, arn=instance["DBInstanceArn"]) - return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results]) + return { + "changed": False, + "instances": [camel_dict_to_snake_dict(instance, ignore_list=["Tags"]) for instance in results], + } def main(): argument_spec = dict( - db_instance_identifier=dict(aliases=['id']), - filters=dict(type='dict') + db_instance_identifier=dict(aliases=["id"]), + filters=dict(type="dict"), ) module = AnsibleAWSModule( @@ -415,10 +422,16 @@ def main(): supports_check_mode=True, ) - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + conn = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - module.exit_json(**instance_info(module, conn)) + instance_name = module.params.get("db_instance_identifier") + filters = module.params.get("filters") + + try: + module.exit_json(**instance_info(conn, instance_name, filters)) + except RdsInstanceInfoFailure as e: + module.fail_json_aws(e.original_e, e.user_message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py index 0f779d8db..ae1d5d7b1 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2014 Ansible Project # Copyright (c) 2017, 2018, 2019 Will Thames # Copyright (c) 2017, 2018 Michael De La Rue # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_instance_snapshot version_added: 5.0.0 @@ -78,13 +76,13 @@ author: - "Alina Buzachis (@alinabuzachis)" - "Joseph Torcasso (@jatorcasso)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create snapshot amazon.aws.rds_instance_snapshot: db_instance_identifier: new-database @@ -103,9 +101,9 @@ EXAMPLES = r''' amazon.aws.rds_instance_snapshot: db_snapshot_identifier: new-database-snapshot state: absent -''' +""" -RETURN = r''' +RETURN = r""" allocated_storage: description: How much storage is allocated in GB. returned: always @@ -228,46 +226,51 @@ vpc_id: returned: always type: str sample: vpc-09ff232e222710ae0 -''' +""" try: import botocore except ImportError: pass # protected by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + # import module snippets -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list def get_snapshot(snapshot_id): try: - snapshot = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)['DBSnapshots'][0] - snapshot['Tags'] = get_tags(client, module, snapshot['DBSnapshotArn']) + snapshot = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)["DBSnapshots"][0] + snapshot["Tags"] = get_tags(client, module, snapshot["DBSnapshotArn"]) except is_boto3_error_code("DBSnapshotNotFound"): return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get snapshot {snapshot_id}") return snapshot def get_parameters(parameters, method_name): - if method_name == 'copy_db_snapshot': - parameters['TargetDBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] + if method_name == "copy_db_snapshot": + parameters["TargetDBSnapshotIdentifier"] = module.params["db_snapshot_identifier"] required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) + method_description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {method_description} requires the parameters: {*required_options, }") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) @@ -289,8 +292,8 @@ def ensure_snapshot_absent(): def ensure_snapshot_present(params): - source_id = module.params.get('source_db_snapshot_identifier') - snapshot_name = module.params.get('db_snapshot_identifier') + source_id = module.params.get("source_db_snapshot_identifier") + snapshot_name = module.params.get("db_snapshot_identifier") changed = False snapshot = get_snapshot(snapshot_name) @@ -307,28 +310,28 @@ def ensure_snapshot_present(params): changed |= modify_snapshot() snapshot = get_snapshot(snapshot_name) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"])) def create_snapshot(params): - method_params = get_parameters(params, 'create_db_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - snapshot, changed = call_method(client, module, 'create_db_snapshot', method_params) + method_params = get_parameters(params, "create_db_snapshot") + if method_params.get("Tags"): + method_params["Tags"] = ansible_dict_to_boto3_tag_list(method_params["Tags"]) + _snapshot, changed = call_method(client, module, "create_db_snapshot", method_params) return changed def copy_snapshot(params): changed = False - snapshot_id = module.params.get('db_snapshot_identifier') + snapshot_id = module.params.get("db_snapshot_identifier") snapshot = get_snapshot(snapshot_id) if not snapshot: - method_params = get_parameters(params, 'copy_db_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - result, changed = call_method(client, module, 'copy_db_snapshot', method_params) + method_params = get_parameters(params, "copy_db_snapshot") + if method_params.get("Tags"): + method_params["Tags"] = ansible_dict_to_boto3_tag_list(method_params["Tags"]) + _result, changed = call_method(client, module, "copy_db_snapshot", method_params) return changed @@ -336,11 +339,18 @@ def copy_snapshot(params): def modify_snapshot(): # TODO - add other modifications aside from purely tags changed = False - snapshot_id = module.params.get('db_snapshot_identifier') + snapshot_id = module.params.get("db_snapshot_identifier") snapshot = get_snapshot(snapshot_id) - if module.params.get('tags'): - changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) + if module.params.get("tags"): + changed |= ensure_tags( + client, + module, + snapshot["DBSnapshotArn"], + snapshot["Tags"], + module.params["tags"], + module.params["purge_tags"], + ) return changed @@ -350,37 +360,37 @@ def main(): global module argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), - db_instance_identifier=dict(aliases=['instance_id']), - source_db_snapshot_identifier=dict(aliases=['source_id', 'source_snapshot_id']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - copy_tags=dict(type='bool', default=False), - source_region=dict(type='str'), + state=dict(choices=["present", "absent"], default="present"), + db_snapshot_identifier=dict(aliases=["id", "snapshot_id"], required=True), + db_instance_identifier=dict(aliases=["instance_id"]), + source_db_snapshot_identifier=dict(aliases=["source_id", "source_snapshot_id"]), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + copy_tags=dict(type="bool", default=False), + source_region=dict(type="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) retry_decorator = AWSRetry.jittered_backoff(retries=10) try: - client = module.client('rds', retry_decorator=retry_decorator) + client = module.client("rds", retry_decorator=retry_decorator) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to connect to AWS.") state = module.params.get("state") - if state == 'absent': + if state == "absent": ensure_snapshot_absent() - elif state == 'present': + elif state == "present": params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec)) ensure_snapshot_present(params) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py index 846581b85..01fbde9af 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: rds_option_group short_description: Manages the creation, modification, deletion of RDS option groups version_added: 5.0.0 @@ -124,13 +122,13 @@ options: type: bool default: True extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create an RDS Mysql Option group - name: Create an RDS Mysql option group amazon.aws.rds_option_group: @@ -141,15 +139,15 @@ EXAMPLES = r''' option_group_description: test mysql option group apply_immediately: true options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - "sg-d188c123" - option_settings: - - name: MAX_SIMULTANEOUS_CONNECTIONS - value: "20" - - name: CHUNK_SIZE_GROWTH_FACTOR - value: "1.25" + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "sg-d188c123" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "20" + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.25" register: new_rds_mysql_option_group # Remove currently configured options for an option group by removing options argument @@ -172,8 +170,8 @@ EXAMPLES = r''' option_group_description: test mysql option group apply_immediately: true tags: - Tag1: tag1 - Tag2: tag2 + Tag1: tag1 + Tag2: tag2 register: rds_mysql_option_group # Delete an RDS Mysql Option group @@ -182,9 +180,9 @@ EXAMPLES = r''' state: absent option_group_name: test-mysql-option-group register: deleted_rds_mysql_option_group -''' +""" -RETURN = r''' +RETURN = r""" allows_vpc_and_non_vpc_instance_memberships: description: Indicates whether this option group can be applied to both VPC and non-VPC instances. returned: always @@ -345,20 +343,19 @@ tags: sample: { "Ansible": "Test" } -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags try: import botocore @@ -369,15 +366,15 @@ except ImportError: @AWSRetry.jittered_backoff(retries=10) def _describe_option_groups(client, **params): try: - paginator = client.get_paginator('describe_option_groups') - return paginator.paginate(**params).build_full_result()['OptionGroupsList'][0] - except is_boto3_error_code('OptionGroupNotFoundFault'): + paginator = client.get_paginator("describe_option_groups") + return paginator.paginate(**params).build_full_result()["OptionGroupsList"][0] + except is_boto3_error_code("OptionGroupNotFoundFault"): return {} def get_option_group(client, module): params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') + params["OptionGroupName"] = module.params.get("option_group_name") try: result = camel_dict_to_snake_dict(_describe_option_groups(client, **params)) @@ -385,7 +382,7 @@ def get_option_group(client, module): module.fail_json_aws(e, msg="Couldn't describe option groups.") if result: - result['tags'] = get_tags(client, module, result['option_group_arn']) + result["tags"] = get_tags(client, module, result["option_group_arn"]) return result @@ -393,12 +390,12 @@ def get_option_group(client, module): def create_option_group_options(client, module): changed = True params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - options_to_include = module.params.get('options') - params['OptionsToInclude'] = snake_dict_to_camel_dict(options_to_include, capitalize_first=True) + params["OptionGroupName"] = module.params.get("option_group_name") + options_to_include = module.params.get("options") + params["OptionsToInclude"] = snake_dict_to_camel_dict(options_to_include, capitalize_first=True) - if module.params.get('apply_immediately'): - params['ApplyImmediately'] = module.params.get('apply_immediately') + if module.params.get("apply_immediately"): + params["ApplyImmediately"] = module.params.get("apply_immediately") if module.check_mode: return changed @@ -414,11 +411,11 @@ def create_option_group_options(client, module): def remove_option_group_options(client, module, options_to_remove): changed = True params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - params['OptionsToRemove'] = options_to_remove + params["OptionGroupName"] = module.params.get("option_group_name") + params["OptionsToRemove"] = options_to_remove - if module.params.get('apply_immediately'): - params['ApplyImmediately'] = module.params.get('apply_immediately') + if module.params.get("apply_immediately"): + params["ApplyImmediately"] = module.params.get("apply_immediately") if module.check_mode: return changed @@ -434,63 +431,59 @@ def remove_option_group_options(client, module, options_to_remove): def create_option_group(client, module): changed = True params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - params['EngineName'] = module.params.get('engine_name') - params['MajorEngineVersion'] = str(module.params.get('major_engine_version')) - params['OptionGroupDescription'] = module.params.get('option_group_description') + params["OptionGroupName"] = module.params.get("option_group_name") + params["EngineName"] = module.params.get("engine_name") + params["MajorEngineVersion"] = str(module.params.get("major_engine_version")) + params["OptionGroupDescription"] = module.params.get("option_group_description") - if module.params.get('tags'): - params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + if module.params.get("tags"): + params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) else: - params['Tags'] = list() + params["Tags"] = list() if module.check_mode: return changed try: client.create_option_group(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to create Option Group.') + module.fail_json_aws(e, msg="Unable to create Option Group.") return changed def match_option_group_options(client, module): requires_update = False - new_options = module.params.get('options') + new_options = module.params.get("options") # Get existing option groups and compare to our new options spec current_option = get_option_group(client, module) - if current_option['options'] == [] and new_options: + if current_option["options"] == [] and new_options: requires_update = True else: - for option in current_option['options']: + for option in current_option["options"]: for setting_name in new_options: - if setting_name['option_name'] == option['option_name']: - + if setting_name["option_name"] == option["option_name"]: # Security groups need to be handled separately due to different keys on request and what is # returned by the API if any( - name in option.keys() - ['option_settings', 'vpc_security_group_memberships'] and - setting_name[name] != option[name] + name in option.keys() - ["option_settings", "vpc_security_group_memberships"] + and setting_name[name] != option[name] for name in setting_name ): requires_update = True - if any( - name in option and name == 'vpc_security_group_memberships' - for name in setting_name - ): - current_sg = set(sg['vpc_security_group_id'] for sg in option['vpc_security_group_memberships']) - new_sg = set(setting_name['vpc_security_group_memberships']) + if any(name in option and name == "vpc_security_group_memberships" for name in setting_name): + current_sg = set(sg["vpc_security_group_id"] for sg in option["vpc_security_group_memberships"]) + new_sg = set(setting_name["vpc_security_group_memberships"]) if current_sg != new_sg: requires_update = True if any( - new_option_setting['name'] == current_option_setting['name'] and - new_option_setting['value'] != current_option_setting['value'] - for new_option_setting in setting_name['option_settings'] - for current_option_setting in option['option_settings'] + new_option_setting["name"] == current_option_setting["name"] + and new_option_setting["value"] != current_option_setting["value"] + for new_option_setting in setting_name["option_settings"] + for current_option_setting in option["option_settings"] ): requires_update = True else: @@ -503,9 +496,9 @@ def compare_option_group(client, module): to_be_added = None to_be_removed = None current_option = get_option_group(client, module) - new_options = module.params.get('options') - new_settings = set([item['option_name'] for item in new_options]) - old_settings = set([item['option_name'] for item in current_option['options']]) + new_options = module.params.get("options") + new_settings = set([item["option_name"] for item in new_options]) + old_settings = set([item["option_name"] for item in current_option["options"]]) if new_settings != old_settings: to_be_added = list(new_settings - old_settings) @@ -529,7 +522,7 @@ def setup_option_group(client, module): # Check tagging changed |= update_tags(client, module, existing_option_group) - if module.params.get('options'): + if module.params.get("options"): # Check if existing options require updating update_required = match_option_group_options(client, module) @@ -550,12 +543,12 @@ def setup_option_group(client, module): # No options were supplied. If options exist, remove them current_option_group = get_option_group(client, module) - if current_option_group['options'] != []: + if current_option_group["options"] != []: # Here we would call our remove options function options_to_remove = [] - for option in current_option_group['options']: - options_to_remove.append(option['option_name']) + for option in current_option_group["options"]: + options_to_remove.append(option["option_name"]) changed |= remove_option_group_options(client, module, options_to_remove) @@ -565,7 +558,7 @@ def setup_option_group(client, module): else: changed = create_option_group(client, module) - if module.params.get('options'): + if module.params.get("options"): changed = create_option_group_options(client, module) results = get_option_group(client, module) @@ -576,13 +569,12 @@ def setup_option_group(client, module): def remove_option_group(client, module): changed = False params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') + params["OptionGroupName"] = module.params.get("option_group_name") # Check if there is an existing options group existing_option_group = get_option_group(client, module) if existing_option_group: - if module.check_mode: return True, {} @@ -596,32 +588,39 @@ def remove_option_group(client, module): def update_tags(client, module, option_group): - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False try: - existing_tags = client.list_tags_for_resource(aws_retry=True, ResourceName=option_group['option_group_arn'])['TagList'] + existing_tags = client.list_tags_for_resource(aws_retry=True, ResourceName=option_group["option_group_arn"])[ + "TagList" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain option group tags.") - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), - module.params['tags'], module.params['purge_tags']) + to_update, to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(existing_tags), module.params["tags"], module.params["purge_tags"] + ) changed = bool(to_update or to_delete) if to_update: try: if module.check_mode: return changed - client.add_tags_to_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], - Tags=ansible_dict_to_boto3_tag_list(to_update)) + client.add_tags_to_resource( + aws_retry=True, + ResourceName=option_group["option_group_arn"], + Tags=ansible_dict_to_boto3_tag_list(to_update), + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to option group.") if to_delete: try: if module.check_mode: return changed - client.remove_tags_from_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], - TagKeys=to_delete) + client.remove_tags_from_resource( + aws_retry=True, ResourceName=option_group["option_group_arn"], TagKeys=to_delete + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove tags from option group.") @@ -630,32 +629,32 @@ def update_tags(client, module, option_group): def main(): argument_spec = dict( - option_group_name=dict(required=True, type='str'), - engine_name=dict(type='str'), - major_engine_version=dict(type='str'), - option_group_description=dict(type='str'), - options=dict(required=False, type='list', elements='dict'), - apply_immediately=dict(type='bool', default=False), - state=dict(required=True, choices=['present', 'absent']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), + option_group_name=dict(required=True, type="str"), + engine_name=dict(type="str"), + major_engine_version=dict(type="str"), + option_group_description=dict(type="str"), + options=dict(required=False, type="list", elements="dict"), + apply_immediately=dict(type="bool", default=False), + state=dict(required=True, choices=["present", "absent"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[['state', 'present', ['engine_name', 'major_engine_version', 'option_group_description']]], + required_if=[["state", "present", ["engine_name", "major_engine_version", "option_group_description"]]], ) try: - client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": changed, results = setup_option_group(client, module) else: changed, results = remove_option_group(client, module) @@ -663,5 +662,5 @@ def main(): module.exit_json(changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py index 532ef5c12..ef836ce56 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_option_group_info short_description: rds_option_group_info module @@ -48,12 +46,12 @@ options: default: '' required: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: List an option group @@ -66,9 +64,9 @@ EXAMPLES = r''' region: ap-southeast-2 profile: production register: option_group -''' +""" -RETURN = r''' +RETURN = r""" changed: description: True if listing the RDS option group succeeds. type: bool @@ -235,57 +233,57 @@ option_groups_list: "Ansible": "Test" } -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @AWSRetry.jittered_backoff(retries=10) def _describe_option_groups(client, **params): try: - paginator = client.get_paginator('describe_option_groups') + paginator = client.get_paginator("describe_option_groups") return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('OptionGroupNotFoundFault'): + except is_boto3_error_code("OptionGroupNotFoundFault"): return {} def list_option_groups(client, module): option_groups = list() params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') + params["OptionGroupName"] = module.params.get("option_group_name") - if module.params.get('marker'): - params['Marker'] = module.params.get('marker') - if int(params['Marker']) < 20 or int(params['Marker']) > 100: + if module.params.get("marker"): + params["Marker"] = module.params.get("marker") + if int(params["Marker"]) < 20 or int(params["Marker"]) > 100: module.fail_json(msg="marker must be between 20 and 100 minutes") - if module.params.get('max_records'): - params['MaxRecords'] = module.params.get('max_records') - if params['MaxRecords'] > 100: + if module.params.get("max_records"): + params["MaxRecords"] = module.params.get("max_records") + if params["MaxRecords"] > 100: module.fail_json(msg="The maximum number of records to include in the response is 100.") - params['EngineName'] = module.params.get('engine_name') - params['MajorEngineVersion'] = module.params.get('major_engine_version') + params["EngineName"] = module.params.get("engine_name") + params["MajorEngineVersion"] = module.params.get("major_engine_version") try: result = _describe_option_groups(client, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't describe option groups.") - for option_group in result['OptionGroupsList']: + for option_group in result["OptionGroupsList"]: # Turn the boto3 result into ansible_friendly_snaked_names converted_option_group = camel_dict_to_snake_dict(option_group) - converted_option_group['tags'] = get_tags(client, module, converted_option_group['option_group_arn']) + converted_option_group["tags"] = get_tags(client, module, converted_option_group["option_group_arn"]) option_groups.append(converted_option_group) return option_groups @@ -293,35 +291,35 @@ def list_option_groups(client, module): def main(): argument_spec = dict( - option_group_name=dict(default='', type='str'), - marker=dict(type='str'), - max_records=dict(type='int', default=100), - engine_name=dict(type='str', default=''), - major_engine_version=dict(type='str', default=''), + option_group_name=dict(default="", type="str"), + marker=dict(type="str"), + max_records=dict(type="int", default=100), + engine_name=dict(type="str", default=""), + major_engine_version=dict(type="str", default=""), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['option_group_name', 'engine_name'], - ['option_group_name', 'major_engine_version'], + ["option_group_name", "engine_name"], + ["option_group_name", "major_engine_version"], ], required_together=[ - ['engine_name', 'major_engine_version'], + ["engine_name", "major_engine_version"], ], ) # Validate Requirements try: - connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("rds", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") results = list_option_groups(connection, module) module.exit_json(result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py index 0bb42e0af..abdb57c9b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: rds_param_group version_added: 5.0.0 @@ -35,6 +33,8 @@ options: - The type of database for this group. - Please use following command to get list of all supported db engines and their respective versions. - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"' + - The DB parameter group family is immutable and can't be changed when updating a DB parameter group. + See U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbparametergroup.html) - Required for I(state=present). type: str immediate: @@ -53,14 +53,13 @@ author: - "Scott Anderson (@tastychutney)" - "Will Thames (@willthames)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 amazon.aws.rds_param_group: state: present @@ -77,9 +76,9 @@ EXAMPLES = ''' amazon.aws.rds_param_group: state: absent name: norwegian-blue -''' +""" -RETURN = ''' +RETURN = r""" db_parameter_group_name: description: Name of DB parameter group type: str @@ -104,38 +103,40 @@ tags: description: dictionary of tags type: dict returned: when state is present -''' +""" + +from itertools import zip_longest try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE from ansible.module_utils.six import string_types -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags INT_MODIFIERS = { - 'K': 1024, - 'M': pow(1024, 2), - 'G': pow(1024, 3), - 'T': pow(1024, 4), + "K": 1024, + "M": pow(1024, 2), + "G": pow(1024, 3), + "T": pow(1024, 4), } @AWSRetry.jittered_backoff() def _describe_db_parameters(connection, **params): try: - paginator = connection.get_paginator('describe_db_parameters') + paginator = connection.get_paginator("describe_db_parameters") return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('DBParameterGroupNotFound'): + except is_boto3_error_code("DBParameterGroupNotFound"): return None @@ -145,7 +146,7 @@ def convert_parameter(param, value): """ converted_value = value - if param['DataType'] == 'integer': + if param["DataType"] == "integer": if isinstance(value, string_types): try: for modifier in INT_MODIFIERS.keys(): @@ -158,7 +159,7 @@ def convert_parameter(param, value): elif isinstance(value, bool): converted_value = 1 if value else 0 - elif param['DataType'] == 'boolean': + elif param["DataType"] == "boolean": if isinstance(value, string_types): converted_value = value in BOOLEANS_TRUE # convert True/False to 1/0 @@ -167,42 +168,43 @@ def convert_parameter(param, value): def update_parameters(module, connection): - groupname = module.params['name'] - desired = module.params['params'] - apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot' + groupname = module.params["name"] + desired = module.params["params"] + apply_method = "immediate" if module.params["immediate"] else "pending-reboot" errors = [] modify_list = [] existing = {} try: _existing = _describe_db_parameters(connection, DBParameterGroupName=groupname) if _existing: - existing = _existing['Parameters'] + existing = _existing["Parameters"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe existing parameter groups") - lookup = dict((param['ParameterName'], param) for param in existing) + lookup = dict((param["ParameterName"], param) for param in existing) for param_key, param_value in desired.items(): if param_key not in lookup: - errors.append("Parameter %s is not an available parameter for the %s engine" % - (param_key, module.params.get('engine'))) + errors.append( + f"Parameter {param_key} is not an available parameter for the {module.params.get('engine')} engine" + ) else: converted_value = convert_parameter(lookup[param_key], param_value) # engine-default parameters do not have a ParameterValue, so we'll always override those. - if converted_value != lookup[param_key].get('ParameterValue'): - if lookup[param_key]['IsModifiable']: - modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method)) + if converted_value != lookup[param_key].get("ParameterValue"): + if lookup[param_key]["IsModifiable"]: + modify_list.append( + dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method) + ) else: - errors.append("Parameter %s is not modifiable" % param_key) + errors.append(f"Parameter {param_key} is not modifiable") # modify_db_parameters takes at most 20 parameters if modify_list and not module.check_mode: - try: - from itertools import izip_longest as zip_longest # python 2 - except ImportError: - from itertools import zip_longest # python 3 for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None): non_empty_slice = [item for item in modify_slice if item] try: - connection.modify_db_parameter_group(aws_retry=True, DBParameterGroupName=groupname, Parameters=non_empty_slice) + connection.modify_db_parameter_group( + aws_retry=True, DBParameterGroupName=groupname, Parameters=non_empty_slice + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update parameters") return True, errors @@ -214,9 +216,12 @@ def update_tags(module, connection, group, tags): return False changed = False - existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'])['TagList'] - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), - tags, module.params['purge_tags']) + existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group["DBParameterGroupArn"])[ + "TagList" + ] + to_update, to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(existing_tags), tags, module.params["purge_tags"] + ) if module.check_mode: if not to_update and not to_delete: @@ -226,15 +231,19 @@ def update_tags(module, connection, group, tags): if to_update: try: - connection.add_tags_to_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], - Tags=ansible_dict_to_boto3_tag_list(to_update)) + connection.add_tags_to_resource( + aws_retry=True, + ResourceName=group["DBParameterGroupArn"], + Tags=ansible_dict_to_boto3_tag_list(to_update), + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to parameter group") if to_delete: try: - connection.remove_tags_from_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], - TagKeys=to_delete) + connection.remove_tags_from_resource( + aws_retry=True, ResourceName=group["DBParameterGroupArn"], TagKeys=to_delete + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove tags from parameter group") @@ -242,22 +251,24 @@ def update_tags(module, connection, group, tags): def ensure_present(module, connection): - groupname = module.params['name'] - tags = module.params.get('tags') + groupname = module.params["name"] + tags = module.params.get("tags") changed = False errors = [] try: response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) - except is_boto3_error_code('DBParameterGroupNotFound'): + except is_boto3_error_code("DBParameterGroupNotFound"): response = None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't access parameter group information") if not response: - params = dict(DBParameterGroupName=groupname, - DBParameterGroupFamily=module.params['engine'], - Description=module.params['description']) + params = dict( + DBParameterGroupName=groupname, + DBParameterGroupFamily=module.params["engine"], + Description=module.params["description"], + ) if tags: - params['Tags'] = ansible_dict_to_boto3_tag_list(tags) + params["Tags"] = ansible_dict_to_boto3_tag_list(tags) if not module.check_mode: try: response = connection.create_db_parameter_group(aws_retry=True, **params) @@ -265,35 +276,45 @@ def ensure_present(module, connection): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create parameter group") else: - group = response['DBParameterGroups'][0] + group = response["DBParameterGroups"][0] + db_parameter_group_family = group["DBParameterGroupFamily"] + + if module.params.get("engine") != db_parameter_group_family: + module.warn("The DB parameter group family (engine) can't be changed when updating a DB parameter group.") + if tags: changed = update_tags(module, connection, group, tags) - if module.params.get('params'): + if module.params.get("params"): params_changed, errors = update_parameters(module, connection) changed = changed or params_changed try: response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) - group = camel_dict_to_snake_dict(response['DBParameterGroups'][0]) - except is_boto3_error_code('DBParameterGroupNotFound'): + group = camel_dict_to_snake_dict(response["DBParameterGroups"][0]) + except is_boto3_error_code("DBParameterGroupNotFound"): module.exit_json(changed=True, errors=errors) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't obtain parameter group information") try: - tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['db_parameter_group_arn'])['TagList'] + tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group["db_parameter_group_arn"])[ + "TagList" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain parameter group tags") - group['tags'] = boto3_tag_list_to_ansible_dict(tags) + group["tags"] = boto3_tag_list_to_ansible_dict(tags) module.exit_json(changed=changed, errors=errors, **group) def ensure_absent(module, connection): - group = module.params['name'] + group = module.params["name"] try: response = connection.describe_db_parameter_groups(DBParameterGroupName=group) - except is_boto3_error_code('DBParameterGroupNotFound'): + except is_boto3_error_code("DBParameterGroupNotFound"): module.exit_json(changed=False) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't access parameter group information") @@ -310,32 +331,32 @@ def ensure_absent(module, connection): def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), engine=dict(), description=dict(), - params=dict(aliases=['parameters'], type='dict'), - immediate=dict(type='bool', aliases=['apply_immediately']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + params=dict(aliases=["parameters"], type="dict"), + immediate=dict(type="bool", aliases=["apply_immediately"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['description', 'engine']]], - supports_check_mode=True + required_if=[["state", "present", ["description", "engine"]]], + supports_check_mode=True, ) try: - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + conn = module.client("rds", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') - if state == 'present': + state = module.params.get("state") + if state == "present": ensure_present(module, conn) - if state == 'absent': + if state == "absent": ensure_absent(module, conn) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py index a9c69ce95..9617c5ad8 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2014-2017 Ansible Project # Copyright (c) 2017, 2018 Will Thames # Copyright (c) 2017, 2018 Michael De La Rue # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: rds_snapshot_info version_added: 5.0.0 @@ -54,13 +52,12 @@ options: author: - "Will Thames (@willthames)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about an snapshot amazon.aws.rds_snapshot_info: db_snapshot_identifier: snapshot_name @@ -69,9 +66,9 @@ EXAMPLES = ''' - name: Get all RDS snapshots for an RDS instance amazon.aws.rds_snapshot_info: db_instance_identifier: helloworld-rds-master -''' +""" -RETURN = ''' +RETURN = r""" snapshots: description: List of non-clustered snapshots returned: When cluster parameters are not passed @@ -289,10 +286,14 @@ cluster_snapshots: returned: always type: str sample: vpc-abcd1234 -''' +""" + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict try: import botocore @@ -303,87 +304,99 @@ except ImportError: def common_snapshot_info(module, conn, method, prefix, params): paginator = conn.get_paginator(method) try: - results = paginator.paginate(**params).build_full_result()['%ss' % prefix] - except is_boto3_error_code('%sNotFound' % prefix): + results = paginator.paginate(**params).build_full_result()[f"{prefix}s"] + except is_boto3_error_code(f"{prefix}NotFound"): results = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "trying to get snapshot information") for snapshot in results: try: - if snapshot['SnapshotType'] != 'shared': - snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix], - aws_retry=True)['TagList']) + if snapshot["SnapshotType"] != "shared": + snapshot["Tags"] = boto3_tag_list_to_ansible_dict( + conn.list_tags_for_resource(ResourceName=snapshot[f"{prefix}Arn"], aws_retry=True)["TagList"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix]) + snapshot_name = snapshot[f"{prefix}Identifier"] + module.fail_json_aws(e, f"Couldn't get tags for snapshot {snapshot_name}") - return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results] + return [camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"]) for snapshot in results] def cluster_snapshot_info(module, conn): - snapshot_name = module.params.get('db_cluster_snapshot_identifier') - snapshot_type = module.params.get('snapshot_type') - instance_name = module.params.get('db_cluster_identifier') + snapshot_name = module.params.get("db_cluster_snapshot_identifier") + snapshot_type = module.params.get("snapshot_type") + instance_name = module.params.get("db_cluster_identifier") params = dict() if snapshot_name: - params['DBClusterSnapshotIdentifier'] = snapshot_name + params["DBClusterSnapshotIdentifier"] = snapshot_name if instance_name: - params['DBClusterIdentifier'] = instance_name + params["DBClusterIdentifier"] = instance_name if snapshot_type: - params['SnapshotType'] = snapshot_type - if snapshot_type == 'public': - params['IncludePublic'] = True - elif snapshot_type == 'shared': - params['IncludeShared'] = True + params["SnapshotType"] = snapshot_type + if snapshot_type == "public": + params["IncludePublic"] = True + elif snapshot_type == "shared": + params["IncludeShared"] = True - return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params) + return common_snapshot_info(module, conn, "describe_db_cluster_snapshots", "DBClusterSnapshot", params) def standalone_snapshot_info(module, conn): - snapshot_name = module.params.get('db_snapshot_identifier') - snapshot_type = module.params.get('snapshot_type') - instance_name = module.params.get('db_instance_identifier') + snapshot_name = module.params.get("db_snapshot_identifier") + snapshot_type = module.params.get("snapshot_type") + instance_name = module.params.get("db_instance_identifier") params = dict() if snapshot_name: - params['DBSnapshotIdentifier'] = snapshot_name + params["DBSnapshotIdentifier"] = snapshot_name if instance_name: - params['DBInstanceIdentifier'] = instance_name + params["DBInstanceIdentifier"] = instance_name if snapshot_type: - params['SnapshotType'] = snapshot_type - if snapshot_type == 'public': - params['IncludePublic'] = True - elif snapshot_type == 'shared': - params['IncludeShared'] = True + params["SnapshotType"] = snapshot_type + if snapshot_type == "public": + params["IncludePublic"] = True + elif snapshot_type == "shared": + params["IncludeShared"] = True - return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params) + return common_snapshot_info(module, conn, "describe_db_snapshots", "DBSnapshot", params) def main(): argument_spec = dict( - db_snapshot_identifier=dict(aliases=['snapshot_name']), + db_snapshot_identifier=dict(aliases=["snapshot_name"]), db_instance_identifier=dict(), db_cluster_identifier=dict(), db_cluster_snapshot_identifier=dict(), - snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public']) + snapshot_type=dict(choices=["automated", "manual", "shared", "public"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] + mutually_exclusive=[ + [ + "db_snapshot_identifier", + "db_instance_identifier", + "db_cluster_identifier", + "db_cluster_snapshot_identifier", + ] + ], ) - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + conn = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10)) results = dict() - if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']: - results['snapshots'] = standalone_snapshot_info(module, conn) - if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']: - results['cluster_snapshots'] = cluster_snapshot_info(module, conn) + if not module.params["db_cluster_identifier"] and not module.params["db_cluster_snapshot_identifier"]: + results["snapshots"] = standalone_snapshot_info(module, conn) + if not module.params["db_snapshot_identifier"] and not module.params["db_instance_identifier"]: + results["cluster_snapshots"] = cluster_snapshot_info(module, conn) module.exit_json(changed=False, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py index 4aae74acd..17fbdb001 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py @@ -4,12 +4,7 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_subnet_group version_added: 5.0.0 @@ -46,14 +41,13 @@ author: - "Scott Anderson (@tastychutney)" - "Alina Buzachis (@alinabuzachis)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Add or change a subnet group amazon.aws.rds_subnet_group: state: present @@ -79,9 +73,9 @@ EXAMPLES = r''' amazon.aws.rds_subnet_group: state: absent name: norwegian-blue -''' +""" -RETURN = r''' +RETURN = r""" changed: description: True if listing the RDS subnet group succeeds. type: bool @@ -181,16 +175,16 @@ subnet_group: sample: tag1: Tag1 tag2: Tag2 -''' +""" from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list try: import botocore @@ -200,89 +194,79 @@ except ImportError: def create_result(changed, subnet_group=None): if subnet_group is None: - return dict( - changed=changed - ) + return dict(changed=changed) result_subnet_group = dict(subnet_group) - result_subnet_group['name'] = result_subnet_group.get( - 'db_subnet_group_name') - result_subnet_group['description'] = result_subnet_group.get( - 'db_subnet_group_description') - result_subnet_group['status'] = result_subnet_group.get( - 'subnet_group_status') - result_subnet_group['subnet_ids'] = create_subnet_list( - subnet_group.get('subnets')) - return dict( - changed=changed, - subnet_group=result_subnet_group - ) + result_subnet_group["name"] = result_subnet_group.get("db_subnet_group_name") + result_subnet_group["description"] = result_subnet_group.get("db_subnet_group_description") + result_subnet_group["status"] = result_subnet_group.get("subnet_group_status") + result_subnet_group["subnet_ids"] = create_subnet_list(subnet_group.get("subnets")) + return dict(changed=changed, subnet_group=result_subnet_group) @AWSRetry.jittered_backoff() def _describe_db_subnet_groups_with_backoff(client, **kwargs): - paginator = client.get_paginator('describe_db_subnet_groups') + paginator = client.get_paginator("describe_db_subnet_groups") return paginator.paginate(**kwargs).build_full_result() def get_subnet_group(client, module): params = dict() - params['DBSubnetGroupName'] = module.params.get('name').lower() + params["DBSubnetGroupName"] = module.params.get("name").lower() try: _result = _describe_db_subnet_groups_with_backoff(client, **params) - except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + except is_boto3_error_code("DBSubnetGroupNotFoundFault"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't describe subnet groups.") if _result: - result = camel_dict_to_snake_dict(_result['DBSubnetGroups'][0]) - result['tags'] = get_tags(client, module, result['db_subnet_group_arn']) + result = camel_dict_to_snake_dict(_result["DBSubnetGroups"][0]) + result["tags"] = get_tags(client, module, result["db_subnet_group_arn"]) return result def create_subnet_list(subnets): - r''' + r""" Construct a list of subnet ids from a list of subnets dicts returned by boto3. Parameters: subnets (list): A list of subnets definitions. @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups Returns: (list): List of subnet ids (str) - ''' + """ subnets_ids = [] for subnet in subnets: - subnets_ids.append(subnet.get('subnet_identifier')) + subnets_ids.append(subnet.get("subnet_identifier")) return subnets_ids def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), description=dict(required=False), - subnets=dict(required=False, type='list', elements='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + subnets=dict(required=False, type="list", elements="str"), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) - required_if = [('state', 'present', ['description', 'subnets'])] + required_if = [("state", "present", ["description", "subnets"])] - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) - state = module.params.get('state') - group_name = module.params.get('name').lower() - group_description = module.params.get('description') - group_subnets = module.params.get('subnets') or [] + state = module.params.get("state") + group_name = module.params.get("name").lower() + group_description = module.params.get("description") + group_subnets = module.params.get("subnets") or [] try: - connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("rds", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to instantiate AWS connection.') + module.fail_json_aws(e, "Failed to instantiate AWS connection.") # Default. changed = None @@ -297,7 +281,7 @@ def main(): matching_groups = get_subnet_group(connection, module) - if state == 'present': + if state == "present": if matching_groups: # We have one or more subnets at this point. @@ -305,22 +289,22 @@ def main(): tags_update = ensure_tags( connection, module, - matching_groups['db_subnet_group_arn'], - matching_groups['tags'], + matching_groups["db_subnet_group_arn"], + matching_groups["tags"], module.params.get("tags"), - module.params['purge_tags'] + module.params["purge_tags"], ) # Sort the subnet groups before we compare them - existing_subnets = create_subnet_list(matching_groups['subnets']) + existing_subnets = create_subnet_list(matching_groups["subnets"]) existing_subnets.sort() group_subnets.sort() # See if anything changed. if ( - matching_groups['db_subnet_group_name'] != group_name or - matching_groups['db_subnet_group_description'] != group_description or - existing_subnets != group_subnets + matching_groups["db_subnet_group_name"] != group_name + or matching_groups["db_subnet_group_description"] != group_description + or existing_subnets != group_subnets ): if not module.check_mode: # Modify existing group. @@ -329,10 +313,10 @@ def main(): aws_retry=True, DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, - SubnetIds=group_subnets + SubnetIds=group_subnets, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to update a subnet group.') + module.fail_json_aws(e, "Failed to update a subnet group.") subnet_update = True else: if not module.check_mode: @@ -342,19 +326,22 @@ def main(): DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets, - Tags=_tags + Tags=_tags, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to create a new subnet group.') + module.fail_json_aws(e, "Failed to create a new subnet group.") subnet_update = True - elif state == 'absent': + elif state == "absent": if not module.check_mode: try: connection.delete_db_subnet_group(aws_retry=True, DBSubnetGroupName=group_name) - except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + except is_boto3_error_code("DBSubnetGroupNotFoundFault"): module.exit_json(**result) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, 'Failed to delete a subnet group.') + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Failed to delete a subnet group.") else: subnet_group = get_subnet_group(connection, module) if subnet_group: @@ -370,5 +357,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53.py b/ansible_collections/amazon/aws/plugins/modules/route53.py index 3ac321763..8a5ccb5a6 100644 --- a/ansible_collections/amazon/aws/plugins/modules/route53.py +++ b/ansible_collections/amazon/aws/plugins/modules/route53.py @@ -4,12 +4,7 @@ # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: route53 version_added: 5.0.0 @@ -164,11 +159,11 @@ author: - Bruce Pennypacker (@bpennypacker) - Mike Buzzetti (@jimbydamonk) extends_documentation_fragment: - - amazon.aws.aws + - amazon.aws.common.modules - amazon.aws.boto3 -''' +""" -RETURN = r''' +RETURN = r""" nameservers: description: Nameservers associated with the zone. returned: when state is 'get' @@ -249,9 +244,15 @@ set: returned: always type: str sample: foo.bar.com. -''' - -EXAMPLES = r''' +wait_id: + description: + - The wait ID for the applied change. Can be used to wait for the change to propagate later on when I(wait=false). + type: str + returned: when changed + version_added: 6.3.0 +""" + +EXAMPLES = r""" - name: Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated amazon.aws.route53: state: present @@ -324,7 +325,7 @@ EXAMPLES = r''' record: elb.foo.com type: A value: "{{ elb_dns_name }}" - alias: True + alias: true alias_hosted_zone_id: "{{ elb_zone_id }}" - name: Retrieve the details for elb.foo.com amazon.aws.route53: @@ -341,7 +342,7 @@ EXAMPLES = r''' ttl: "{{ rec.set.ttl }}" type: "{{ rec.set.type }}" value: "{{ rec.set.value }}" - alias: True + alias: true alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" - name: Add an alias record that points to an Amazon ELB and evaluates it health amazon.aws.route53: @@ -350,9 +351,9 @@ EXAMPLES = r''' record: elb.foo.com type: A value: "{{ elb_dns_name }}" - alias: True + alias: true alias_hosted_zone_id: "{{ elb_zone_id }}" - alias_evaluate_target_health: True + alias_evaluate_target_health: true - name: Add an AAAA record with Hosted Zone ID amazon.aws.route53: state: present @@ -407,7 +408,7 @@ EXAMPLES = r''' geo_location: country_code: US subdivision_code: TX -''' +""" from operator import itemgetter @@ -419,10 +420,10 @@ except ImportError: from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing @@ -431,23 +432,23 @@ WAIT_RETRY = 5 # how many seconds to wait between propagation status polls @AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) def _list_record_sets(route53, **kwargs): - paginator = route53.get_paginator('list_resource_record_sets') - return paginator.paginate(**kwargs).build_full_result()['ResourceRecordSets'] + paginator = route53.get_paginator("list_resource_record_sets") + return paginator.paginate(**kwargs).build_full_result()["ResourceRecordSets"] @AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) def _list_hosted_zones(route53, **kwargs): - paginator = route53.get_paginator('list_hosted_zones') - return paginator.paginate(**kwargs).build_full_result()['HostedZones'] + paginator = route53.get_paginator("list_hosted_zones") + return paginator.paginate(**kwargs).build_full_result()["HostedZones"] def get_record(route53, zone_id, record_name, record_type, record_identifier): record_sets_results = _list_record_sets(route53, HostedZoneId=zone_id) for record_set in record_sets_results: - record_set['Name'] = record_set['Name'].encode().decode('unicode_escape') + record_set["Name"] = record_set["Name"].encode().decode("unicode_escape") # If the record name and type is not equal, move to the next record - if (record_name.lower(), record_type) != (record_set['Name'].lower(), record_set['Type']): + if (record_name.lower(), record_type) != (record_set["Name"].lower(), record_set["Type"]): continue if record_identifier and record_identifier != record_set.get("SetIdentifier"): @@ -465,15 +466,15 @@ def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): for zone in hosted_zones_results: # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params - private_zone = module.boolean(zone['Config'].get('PrivateZone', False)) - zone_id = zone['Id'].replace("/hostedzone/", "") + private_zone = module.boolean(zone["Config"].get("PrivateZone", False)) + zone_id = zone["Id"].replace("/hostedzone/", "") - if private_zone == want_private and zone['Name'] == zone_name: + if private_zone == want_private and zone["Name"] == zone_name: if want_vpc_id: # NOTE: These details aren't available in other boto3 methods, hence the necessary # extra API call hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone_id) - if want_vpc_id in [v['VPCId'] for v in hosted_zone['VPCs']]: + if want_vpc_id in [v["VPCId"] for v in hosted_zone["VPCs"]]: return zone_id else: return zone_id @@ -489,239 +490,264 @@ def format_record(record_in, zone_in, zone_id): return None record = dict(record_in) - record['zone'] = zone_in - record['hosted_zone_id'] = zone_id - - record['type'] = record_in.get('Type', None) - record['record'] = record_in.get('Name').encode().decode('unicode_escape') - record['ttl'] = record_in.get('TTL', None) - record['identifier'] = record_in.get('SetIdentifier', None) - record['weight'] = record_in.get('Weight', None) - record['region'] = record_in.get('Region', None) - record['failover'] = record_in.get('Failover', None) - record['health_check'] = record_in.get('HealthCheckId', None) - - if record['ttl']: - record['ttl'] = str(record['ttl']) - if record['weight']: - record['weight'] = str(record['weight']) - if record['region']: - record['region'] = str(record['region']) - - if record_in.get('AliasTarget'): - record['alias'] = True - record['value'] = record_in['AliasTarget'].get('DNSName') - record['values'] = [record_in['AliasTarget'].get('DNSName')] - record['alias_hosted_zone_id'] = record_in['AliasTarget'].get('HostedZoneId') - record['alias_evaluate_target_health'] = record_in['AliasTarget'].get('EvaluateTargetHealth') + record["zone"] = zone_in + record["hosted_zone_id"] = zone_id + + record["type"] = record_in.get("Type", None) + record["record"] = record_in.get("Name").encode().decode("unicode_escape") + record["ttl"] = record_in.get("TTL", None) + record["identifier"] = record_in.get("SetIdentifier", None) + record["weight"] = record_in.get("Weight", None) + record["region"] = record_in.get("Region", None) + record["failover"] = record_in.get("Failover", None) + record["health_check"] = record_in.get("HealthCheckId", None) + + if record["ttl"]: + record["ttl"] = str(record["ttl"]) + if record["weight"]: + record["weight"] = str(record["weight"]) + if record["region"]: + record["region"] = str(record["region"]) + + if record_in.get("AliasTarget"): + record["alias"] = True + record["value"] = record_in["AliasTarget"].get("DNSName") + record["values"] = [record_in["AliasTarget"].get("DNSName")] + record["alias_hosted_zone_id"] = record_in["AliasTarget"].get("HostedZoneId") + record["alias_evaluate_target_health"] = record_in["AliasTarget"].get("EvaluateTargetHealth") else: - record['alias'] = False - records = [r.get('Value') for r in record_in.get('ResourceRecords')] - record['value'] = ','.join(sorted(records)) - record['values'] = sorted(records) + record["alias"] = False + records = [r.get("Value") for r in record_in.get("ResourceRecords")] + record["value"] = ",".join(sorted(records)) + record["values"] = sorted(records) return record def get_hosted_zone_nameservers(route53, zone_id): - hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['HostedZone']['Name'] + hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)["HostedZone"]["Name"] resource_records_sets = _list_record_sets(route53, HostedZoneId=zone_id) nameservers_records = list( - filter(lambda record: record['Name'] == hosted_zone_name and record['Type'] == 'NS', resource_records_sets) - )[0]['ResourceRecords'] + filter(lambda record: record["Name"] == hosted_zone_name and record["Type"] == "NS", resource_records_sets) + )[0]["ResourceRecords"] - return [ns_record['Value'] for ns_record in nameservers_records] + return [ns_record["Value"] for ns_record in nameservers_records] def main(): argument_spec = dict( - state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']), - zone=dict(type='str'), - hosted_zone_id=dict(type='str'), - record=dict(type='str', required=True), - ttl=dict(type='int', default=3600), - type=dict(type='str', required=True, choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']), - alias=dict(type='bool'), - alias_hosted_zone_id=dict(type='str'), - alias_evaluate_target_health=dict(type='bool', default=False), - value=dict(type='list', elements='str'), - overwrite=dict(type='bool'), - retry_interval=dict(type='int', default=500), - private_zone=dict(type='bool', default=False), - identifier=dict(type='str'), - weight=dict(type='int'), - region=dict(type='str'), - geo_location=dict(type='dict', - options=dict( - continent_code=dict(type="str"), - country_code=dict(type="str"), - subdivision_code=dict(type="str")), - required=False), - health_check=dict(type='str'), - failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']), - vpc_id=dict(type='str'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), + state=dict( + type="str", required=True, choices=["absent", "create", "delete", "get", "present"], aliases=["command"] + ), + zone=dict(type="str"), + hosted_zone_id=dict(type="str"), + record=dict(type="str", required=True), + ttl=dict(type="int", default=3600), + type=dict( + type="str", + required=True, + choices=["A", "AAAA", "CAA", "CNAME", "MX", "NS", "PTR", "SOA", "SPF", "SRV", "TXT"], + ), + alias=dict(type="bool"), + alias_hosted_zone_id=dict(type="str"), + alias_evaluate_target_health=dict(type="bool", default=False), + value=dict(type="list", elements="str"), + overwrite=dict(type="bool"), + retry_interval=dict(type="int", default=500), + private_zone=dict(type="bool", default=False), + identifier=dict(type="str"), + weight=dict(type="int"), + region=dict(type="str"), + geo_location=dict( + type="dict", + options=dict( + continent_code=dict(type="str"), country_code=dict(type="str"), subdivision_code=dict(type="str") + ), + required=False, + ), + health_check=dict(type="str"), + failover=dict(type="str", choices=["PRIMARY", "SECONDARY"]), + vpc_id=dict(type="str"), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[['zone', 'hosted_zone_id']], + required_one_of=[["zone", "hosted_zone_id"]], # If alias is True then you must specify alias_hosted_zone as well - required_together=[['alias', 'alias_hosted_zone_id']], + required_together=[["alias", "alias_hosted_zone_id"]], # state=present, absent, create, delete THEN value is required required_if=( - ('state', 'present', ['value']), - ('state', 'create', ['value']), + ("state", "present", ["value"]), + ("state", "create", ["value"]), ), # failover, region and weight are mutually exclusive mutually_exclusive=[ - ('failover', 'region', 'weight'), - ('alias', 'ttl'), + ("failover", "region", "weight"), + ("alias", "ttl"), ], # failover, region, weight and geo_location require identifier required_by=dict( - failover=('identifier',), - region=('identifier',), - weight=('identifier',), - geo_location=('identifier'), + failover=("identifier",), + region=("identifier",), + weight=("identifier",), + geo_location=("identifier",), ), ) - if module.params['state'] in ('present', 'create'): - command_in = 'create' - elif module.params['state'] in ('absent', 'delete'): - command_in = 'delete' - elif module.params['state'] == 'get': - command_in = 'get' - - zone_in = (module.params.get('zone') or '').lower() - hosted_zone_id_in = module.params.get('hosted_zone_id') - ttl_in = module.params.get('ttl') - record_in = module.params.get('record').lower() - type_in = module.params.get('type') - value_in = module.params.get('value') or [] - alias_in = module.params.get('alias') - alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') - alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health') - retry_interval_in = module.params.get('retry_interval') - - if module.params['vpc_id'] is not None: + if module.params["state"] in ("present", "create"): + command_in = "create" + elif module.params["state"] in ("absent", "delete"): + command_in = "delete" + elif module.params["state"] == "get": + command_in = "get" + + zone_in = (module.params.get("zone") or "").lower() + hosted_zone_id_in = module.params.get("hosted_zone_id") + ttl_in = module.params.get("ttl") + record_in = module.params.get("record").lower() + type_in = module.params.get("type") + value_in = module.params.get("value") or [] + alias_in = module.params.get("alias") + alias_hosted_zone_id_in = module.params.get("alias_hosted_zone_id") + alias_evaluate_target_health_in = module.params.get("alias_evaluate_target_health") + retry_interval_in = module.params.get("retry_interval") + + if module.params["vpc_id"] is not None: private_zone_in = True else: - private_zone_in = module.params.get('private_zone') - - identifier_in = module.params.get('identifier') - weight_in = module.params.get('weight') - region_in = module.params.get('region') - health_check_in = module.params.get('health_check') - failover_in = module.params.get('failover') - vpc_id_in = module.params.get('vpc_id') - wait_in = module.params.get('wait') - wait_timeout_in = module.params.get('wait_timeout') - geo_location = module.params.get('geo_location') - - if zone_in[-1:] != '.': + private_zone_in = module.params.get("private_zone") + + identifier_in = module.params.get("identifier") + weight_in = module.params.get("weight") + region_in = module.params.get("region") + health_check_in = module.params.get("health_check") + failover_in = module.params.get("failover") + vpc_id_in = module.params.get("vpc_id") + wait_in = module.params.get("wait") + wait_timeout_in = module.params.get("wait_timeout") + geo_location = module.params.get("geo_location") + + if zone_in[-1:] != ".": zone_in += "." - if record_in[-1:] != '.': + if record_in[-1:] != ".": record_in += "." - if command_in == 'create' or command_in == 'delete': + if command_in == "create" or command_in == "delete": if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") - if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None: - module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") + if ( + weight_in is None and region_in is None and failover_in is None and geo_location is None + ) and identifier_in is not None: + module.fail_json( + msg=( + "You have specified identifier which makes sense only if you specify one of: weight, region," + " geo_location or failover." + ) + ) retry_decorator = AWSRetry.jittered_backoff( retries=MAX_AWS_RETRIES, delay=retry_interval_in, - catch_extra_error_codes=['PriorRequestNotComplete'], + catch_extra_error_codes=["PriorRequestNotComplete"], max_delay=max(60, retry_interval_in), ) # connect to the route53 endpoint try: - route53 = module.client('route53', retry_decorator=retry_decorator) + route53 = module.client("route53", retry_decorator=retry_decorator) except botocore.exceptions.HTTPClientError as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # Find the named zone ID zone_id = hosted_zone_id_in or get_zone_id_by_name(route53, module, zone_in, private_zone_in, vpc_id_in) # Verify that the requested zone is already defined in Route53 if zone_id is None: - errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in) + errmsg = f"Zone {zone_in or hosted_zone_id_in} does not exist in Route53" module.fail_json(msg=errmsg) aws_record = get_record(route53, zone_id, record_in, type_in, identifier_in) - resource_record_set = scrub_none_parameters({ - 'Name': record_in, - 'Type': type_in, - 'Weight': weight_in, - 'Region': region_in, - 'Failover': failover_in, - 'TTL': ttl_in, - 'ResourceRecords': [dict(Value=value) for value in value_in], - 'HealthCheckId': health_check_in, - 'SetIdentifier': identifier_in, - }) + resource_record_set = scrub_none_parameters( + { + "Name": record_in, + "Type": type_in, + "Weight": weight_in, + "Region": region_in, + "Failover": failover_in, + "TTL": ttl_in, + "ResourceRecords": [dict(Value=value) for value in value_in], + "HealthCheckId": health_check_in, + "SetIdentifier": identifier_in, + } + ) if geo_location: - continent_code = geo_location.get('continent_code') - country_code = geo_location.get('country_code') - subdivision_code = geo_location.get('subdivision_code') + continent_code = geo_location.get("continent_code") + country_code = geo_location.get("country_code") + subdivision_code = geo_location.get("subdivision_code") if continent_code and (country_code or subdivision_code): - module.fail_json(changed=False, msg='While using geo_location, continent_code is mutually exclusive with country_code and subdivision_code.') + module.fail_json( + changed=False, + msg=( + "While using geo_location, continent_code is mutually exclusive with country_code and" + " subdivision_code." + ), + ) if not any([continent_code, country_code, subdivision_code]): - module.fail_json(changed=False, msg='To use geo_location please specify either continent_code, country_code, or subdivision_code.') + module.fail_json( + changed=False, + msg="To use geo_location please specify either continent_code, country_code, or subdivision_code.", + ) - if geo_location.get('subdivision_code') and geo_location.get('country_code').lower() != 'us': - module.fail_json(changed=False, msg='To use subdivision_code, you must specify country_code as US.') + if geo_location.get("subdivision_code") and geo_location.get("country_code").lower() != "us": + module.fail_json(changed=False, msg="To use subdivision_code, you must specify country_code as US.") # Build geo_location suboptions specification - resource_record_set['GeoLocation'] = {} + resource_record_set["GeoLocation"] = {} if continent_code: - resource_record_set['GeoLocation']['ContinentCode'] = continent_code + resource_record_set["GeoLocation"]["ContinentCode"] = continent_code if country_code: - resource_record_set['GeoLocation']['CountryCode'] = country_code + resource_record_set["GeoLocation"]["CountryCode"] = country_code if subdivision_code: - resource_record_set['GeoLocation']['SubdivisionCode'] = subdivision_code + resource_record_set["GeoLocation"]["SubdivisionCode"] = subdivision_code - if command_in == 'delete' and aws_record is not None: - resource_record_set['TTL'] = aws_record.get('TTL') - if not resource_record_set['ResourceRecords']: - resource_record_set['ResourceRecords'] = aws_record.get('ResourceRecords') + if command_in == "delete" and aws_record is not None: + resource_record_set["TTL"] = aws_record.get("TTL") + if not resource_record_set["ResourceRecords"]: + resource_record_set["ResourceRecords"] = aws_record.get("ResourceRecords") if alias_in: - resource_record_set['AliasTarget'] = dict( + resource_record_set["AliasTarget"] = dict( HostedZoneId=alias_hosted_zone_id_in, DNSName=value_in[0], - EvaluateTargetHealth=alias_evaluate_target_health_in + EvaluateTargetHealth=alias_evaluate_target_health_in, ) - if 'ResourceRecords' in resource_record_set: - del resource_record_set['ResourceRecords'] - if 'TTL' in resource_record_set: - del resource_record_set['TTL'] + if "ResourceRecords" in resource_record_set: + del resource_record_set["ResourceRecords"] + if "TTL" in resource_record_set: + del resource_record_set["TTL"] # On CAA records order doesn't matter - if type_in == 'CAA': - resource_record_set['ResourceRecords'] = sorted(resource_record_set['ResourceRecords'], key=itemgetter('Value')) + if type_in == "CAA": + resource_record_set["ResourceRecords"] = sorted(resource_record_set["ResourceRecords"], key=itemgetter("Value")) if aws_record: - aws_record['ResourceRecords'] = sorted(aws_record['ResourceRecords'], key=itemgetter('Value')) + aws_record["ResourceRecords"] = sorted(aws_record["ResourceRecords"], key=itemgetter("Value")) - if command_in == 'create' and aws_record == resource_record_set: + if command_in == "create" and aws_record == resource_record_set: rr_sets = [camel_dict_to_snake_dict(resource_record_set)] module.exit_json(changed=False, resource_records_sets=rr_sets) - if command_in == 'get': - if type_in == 'NS': - ns = aws_record.get('values', []) + if command_in == "get": + if type_in == "NS": + ns = aws_record.get("values", []) else: # Retrieve name servers associated to the zone. ns = get_hosted_zone_nameservers(route53, zone_id) @@ -735,49 +761,47 @@ def main(): rr_sets = [camel_dict_to_snake_dict(aws_record)] module.exit_json(changed=False, set=formatted_aws, nameservers=ns, resource_record_sets=rr_sets) - if command_in == 'delete' and not aws_record: + if command_in == "delete" and not aws_record: module.exit_json(changed=False) - if command_in == 'create' or command_in == 'delete': - if command_in == 'create' and aws_record: - if not module.params['overwrite']: + if command_in == "create" or command_in == "delete": + if command_in == "create" and aws_record: + if not module.params["overwrite"]: module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it") - command = 'UPSERT' + command = "UPSERT" else: command = command_in.upper() + wait_id = None if not module.check_mode: try: change_resource_record_sets = route53.change_resource_record_sets( aws_retry=True, HostedZoneId=zone_id, - ChangeBatch=dict( - Changes=[ - dict( - Action=command, - ResourceRecordSet=resource_record_set - ) - ] - ) + ChangeBatch=dict(Changes=[dict(Action=command, ResourceRecordSet=resource_record_set)]), ) + wait_id = change_resource_record_sets["ChangeInfo"]["Id"] if wait_in: - waiter = get_waiter(route53, 'resource_record_sets_changed') + waiter = get_waiter(route53, "resource_record_sets_changed") waiter.wait( - Id=change_resource_record_sets['ChangeInfo']['Id'], + Id=change_resource_record_sets["ChangeInfo"]["Id"], WaiterConfig=dict( Delay=WAIT_RETRY, MaxAttempts=wait_timeout_in // WAIT_RETRY, - ) + ), ) - except is_boto3_error_message('but it already exists'): + except is_boto3_error_message("but it already exists"): module.exit_json(changed=False) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout waiting for resource records changes to be applied') - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to update records') + module.fail_json_aws(e, msg="Timeout waiting for resource records changes to be applied") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to update records") except Exception as e: - module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") rr_sets = [camel_dict_to_snake_dict(resource_record_set)] formatted_aws = format_record(aws_record, zone_in, zone_id) @@ -785,13 +809,14 @@ def main(): module.exit_json( changed=True, + wait_id=wait_id, diff=dict( before=formatted_aws, - after=formatted_record if command_in != 'delete' else {}, + after=formatted_record if command_in != "delete" else {}, resource_record_sets=rr_sets, ), ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py index 1528be9ae..369c7c774 100644 --- a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py +++ b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: route53_health_check version_added: 5.0.0 @@ -47,8 +45,22 @@ options: - The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy. - Once health_check is created, type can not be changed. - choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ] + - The CALCULATED choice was added in 6.3.0. + choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP', 'CALCULATED' ] type: str + child_health_checks: + description: + - The child health checks used for a calculated health check. + - This parameter takes in the child health checks ids. + type: list + elements: str + version_added: 6.3.0 + health_threshold: + description: + - The minimum number of healthy child health checks for a calculated health check to be considered healthy. + default: 1 + type: int + version_added: 6.3.0 resource_path: description: - The path that you want Amazon Route 53 to request when performing @@ -126,13 +138,13 @@ author: notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a health-check for host1.example.com and use it in record amazon.aws.route53_health_check: state: present @@ -194,10 +206,9 @@ EXAMPLES = ''' amazon.aws.route53_health_check: state: absent id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx +""" -''' - -RETURN = r''' +RETURN = r""" health_check: description: Information about the health check. returned: success @@ -278,7 +289,7 @@ health_check: type: dict returned: When the health check exists. sample: '{"my_key": "my_value"}' -''' +""" import uuid @@ -289,9 +300,9 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.route53 import get_tags from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags @@ -300,7 +311,7 @@ def _list_health_checks(**params): try: results = client.list_health_checks(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to list health checks') + module.fail_json_aws(e, msg="Failed to list health checks") return results @@ -321,19 +332,19 @@ def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): # starting from scratch with a paginator results = _list_health_checks() while True: - for check in results.get('HealthChecks'): - config = check.get('HealthCheckConfig') + for check in results.get("HealthChecks"): + config = check.get("HealthCheckConfig") if ( - config.get('IPAddress', None) == ip_addr and - config.get('FullyQualifiedDomainName', None) == fqdn and - config.get('Type') == hc_type and - config.get('RequestInterval') == request_interval and - config.get('Port', None) == port + config.get("IPAddress", None) == ip_addr + and config.get("FullyQualifiedDomainName", None) == fqdn + and config.get("Type") == hc_type + and config.get("RequestInterval") == request_interval + and config.get("Port", None) == port ): return check - if results.get('IsTruncated', False): - results = _list_health_checks(Marker=results.get('NextMarker')) + if results.get("IsTruncated", False): + results = _list_health_checks(Marker=results.get("NextMarker")) else: return None @@ -342,12 +353,12 @@ def get_existing_checks_with_name(): results = _list_health_checks() health_checks_with_name = {} while True: - for check in results.get('HealthChecks'): - if 'Name' in describe_health_check(check['Id'])['tags']: - check_name = describe_health_check(check['Id'])['tags']['Name'] + for check in results.get("HealthChecks"): + if "Name" in describe_health_check(check["Id"])["tags"]: + check_name = describe_health_check(check["Id"])["tags"]["Name"] health_checks_with_name[check_name] = check - if results.get('IsTruncated', False): - results = _list_health_checks(Marker=results.get('NextMarker')) + if results.get("IsTruncated", False): + results = _list_health_checks(Marker=results.get("NextMarker")) else: return health_checks_with_name @@ -357,24 +368,28 @@ def delete_health_check(check_id): return False, None if module.check_mode: - return True, 'delete' + return True, "delete" try: client.delete_health_check( aws_retry=True, HealthCheckId=check_id, ) - except is_boto3_error_code('NoSuchHealthCheck'): + except is_boto3_error_code("NoSuchHealthCheck"): # Handle the deletion race condition as cleanly as possible return False, None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to list health checks') - - return True, 'delete' + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to list health checks") + return True, "delete" -def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in): +def create_health_check( + ip_addr_in, fqdn_in, type_in, request_interval_in, port_in, child_health_checks_in, health_threshold_in +): # In general, if a request is repeated with the same CallerRef it won't # result in a duplicate check appearing. This means we can safely use our # retry decorators @@ -383,43 +398,52 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ health_check = dict( Type=type_in, - RequestInterval=request_interval_in, - Port=port_in, ) - if module.params.get('disabled') is not None: - health_check['Disabled'] = module.params.get('disabled') + if module.params.get("disabled") is not None: + health_check["Disabled"] = module.params.get("disabled") if ip_addr_in: - health_check['IPAddress'] = ip_addr_in + health_check["IPAddress"] = ip_addr_in if fqdn_in: - health_check['FullyQualifiedDomainName'] = fqdn_in + health_check["FullyQualifiedDomainName"] = fqdn_in + if port_in: + health_check["Port"] = port_in - if type_in in ['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: - resource_path = module.params.get('resource_path') + if type_in in ["HTTP", "HTTPS", "HTTP_STR_MATCH", "HTTPS_STR_MATCH"]: + resource_path = module.params.get("resource_path") # if not resource_path: # missing_args.append('resource_path') if resource_path: - health_check['ResourcePath'] = resource_path - if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: - string_match = module.params.get('string_match') + health_check["ResourcePath"] = resource_path + if type_in in ["HTTP_STR_MATCH", "HTTPS_STR_MATCH"]: + string_match = module.params.get("string_match") if not string_match: - missing_args.append('string_match') - health_check['SearchString'] = module.params.get('string_match') - - failure_threshold = module.params.get('failure_threshold') - if not failure_threshold: - failure_threshold = 3 - health_check['FailureThreshold'] = failure_threshold + missing_args.append("string_match") + health_check["SearchString"] = module.params.get("string_match") + + if type_in == "CALCULATED": + if not child_health_checks_in: + missing_args.append("child_health_checks") + if not health_threshold_in: + missing_args.append("health_threshold") + health_check["ChildHealthChecks"] = child_health_checks_in + health_check["HealthThreshold"] = health_threshold_in + else: + failure_threshold = module.params.get("failure_threshold") + if not failure_threshold: + failure_threshold = 3 + health_check["FailureThreshold"] = failure_threshold + health_check["RequestInterval"] = request_interval_in - if module.params.get('measure_latency') is not None: - health_check['MeasureLatency'] = module.params.get('measure_latency') + if module.params.get("measure_latency") is not None: + health_check["MeasureLatency"] = module.params.get("measure_latency") if missing_args: - module.fail_json(msg='missing required arguments for creation: {0}'.format( - ', '.join(missing_args)), + module.fail_json( + msg=f"missing required arguments for creation: {', '.join(missing_args)}", ) if module.check_mode: - return True, 'create', None + return True, "create", None try: result = client.create_health_check( @@ -428,10 +452,10 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ HealthCheckConfig=health_check, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to create health check.', health_check=health_check) + module.fail_json_aws(e, msg="Failed to create health check.", health_check=health_check) - check_id = result.get('HealthCheck').get('Id') - return True, 'create', check_id + check_id = result.get("HealthCheck").get("Id") + return True, "create", check_id def update_health_check(existing_check): @@ -443,49 +467,62 @@ def update_health_check(existing_check): # - IPAddress # - Port # - FullyQualifiedDomainName + # - ChildHealthChecks + # - HealthThreshold changes = dict() - existing_config = existing_check.get('HealthCheckConfig') + existing_config = existing_check.get("HealthCheckConfig") + check_id = existing_check.get("Id") - resource_path = module.params.get('resource_path', None) - if resource_path and resource_path != existing_config.get('ResourcePath'): - changes['ResourcePath'] = resource_path + resource_path = module.params.get("resource_path", None) + if resource_path and resource_path != existing_config.get("ResourcePath"): + changes["ResourcePath"] = resource_path - search_string = module.params.get('string_match', None) - if search_string and search_string != existing_config.get('SearchString'): - changes['SearchString'] = search_string + search_string = module.params.get("string_match", None) + if search_string and search_string != existing_config.get("SearchString"): + changes["SearchString"] = search_string - failure_threshold = module.params.get('failure_threshold', None) - if failure_threshold and failure_threshold != existing_config.get('FailureThreshold'): - changes['FailureThreshold'] = failure_threshold + type_in = module.params.get("type", None) + if type_in != "CALCULATED": + failure_threshold = module.params.get("failure_threshold", None) + if failure_threshold and failure_threshold != existing_config.get("FailureThreshold"): + changes["FailureThreshold"] = failure_threshold - disabled = module.params.get('disabled', None) - if disabled is not None and disabled != existing_config.get('Disabled'): - changes['Disabled'] = module.params.get('disabled') + disabled = module.params.get("disabled", None) + if disabled is not None and disabled != existing_config.get("Disabled"): + changes["Disabled"] = module.params.get("disabled") # If updating based on Health Check ID or health_check_name, we can update - if module.params.get('health_check_id') or module.params.get('use_unique_names'): - ip_address = module.params.get('ip_address', None) - if ip_address is not None and ip_address != existing_config.get('IPAddress'): - changes['IPAddress'] = module.params.get('ip_address') + if module.params.get("health_check_id") or module.params.get("use_unique_names"): + ip_address = module.params.get("ip_address", None) + if ip_address is not None and ip_address != existing_config.get("IPAddress"): + changes["IPAddress"] = module.params.get("ip_address") + + port = module.params.get("port", None) + if port is not None and port != existing_config.get("Port"): + changes["Port"] = module.params.get("port") + + fqdn = module.params.get("fqdn", None) + if fqdn is not None and fqdn != existing_config.get("FullyQualifiedDomainName"): + changes["FullyQualifiedDomainName"] = module.params.get("fqdn") - port = module.params.get('port', None) - if port is not None and port != existing_config.get('Port'): - changes['Port'] = module.params.get('port') + if type_in == "CALCULATED": + child_health_checks = module.params.get("child_health_checks", None) + if child_health_checks is not None and child_health_checks != existing_config.get("ChildHealthChecks"): + changes["ChildHealthChecks"] = module.params.get("child_health_checks") - fqdn = module.params.get('fqdn', None) - if fqdn is not None and fqdn != existing_config.get('FullyQualifiedDomainName'): - changes['FullyQualifiedDomainName'] = module.params.get('fqdn') + health_threshold = module.params.get("health_threshold", None) + if health_threshold is not None and health_threshold != existing_config.get("HealthThreshold"): + changes["HealthThreshold"] = module.params.get("health_threshold") # No changes... if not changes: - return False, None + return False, None, check_id if module.check_mode: - return True, 'update' + return True, "update", check_id - check_id = existing_check.get('Id') # This makes sure we're starting from the version we think we are... - version_id = existing_check.get('HealthCheckVersion', 1) + version_id = existing_check.get("HealthCheckVersion", 1) try: client.update_health_check( HealthCheckId=check_id, @@ -493,9 +530,9 @@ def update_health_check(existing_check): **changes, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to update health check.', id=check_id) + module.fail_json_aws(e, msg="Failed to update health check.", id=check_id) - return True, 'update' + return True, "update", check_id def describe_health_check(id): @@ -508,49 +545,55 @@ def describe_health_check(id): HealthCheckId=id, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to get health check.', id=id) + module.fail_json_aws(e, msg="Failed to get health check.", id=id) - health_check = result.get('HealthCheck', {}) + health_check = result.get("HealthCheck", {}) health_check = camel_dict_to_snake_dict(health_check) - tags = get_tags(module, client, 'healthcheck', id) - health_check['tags'] = tags + tags = get_tags(module, client, "healthcheck", id) + health_check["tags"] = tags return health_check def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - disabled=dict(type='bool'), + state=dict(choices=["present", "absent"], default="present"), + disabled=dict(type="bool"), ip_address=dict(), - port=dict(type='int'), - type=dict(choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), + port=dict(type="int"), + type=dict(choices=["HTTP", "HTTPS", "HTTP_STR_MATCH", "HTTPS_STR_MATCH", "TCP", "CALCULATED"]), + child_health_checks=dict(type="list", elements="str"), + health_threshold=dict(type="int", default=1), resource_path=dict(), fqdn=dict(), string_match=dict(), - request_interval=dict(type='int', choices=[10, 30], default=30), - failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - health_check_id=dict(type='str', aliases=['id'], required=False), - health_check_name=dict(type='str', aliases=['name'], required=False), - use_unique_names=dict(type='bool', required=False), - measure_latency=dict(type='bool', required=False), + request_interval=dict(type="int", choices=[10, 30], default=30), + failure_threshold=dict(type="int", choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + health_check_id=dict(type="str", aliases=["id"], required=False), + health_check_name=dict(type="str", aliases=["name"], required=False), + use_unique_names=dict(type="bool", required=False), + measure_latency=dict(type="bool", required=False), ) args_one_of = [ - ['ip_address', 'fqdn', 'health_check_id'], + ["ip_address", "fqdn", "health_check_id", "child_health_checks"], ] args_if = [ - ['type', 'TCP', ('port',)], + ["type", "TCP", ("port",)], + ["type", "CALCULATED", ("child_health_checks", "health_threshold")], ] args_required_together = [ - ['use_unique_names', 'health_check_name'], + ["use_unique_names", "health_check_name"], ] args_mutually_exclusive = [ - ['health_check_id', 'health_check_name'] + ["health_check_id", "health_check_name"], + ["child_health_checks", "ip_address"], + ["child_health_checks", "port"], + ["child_health_checks", "fqdn"], ] global module @@ -565,63 +608,59 @@ def main(): supports_check_mode=True, ) - if not module.params.get('health_check_id') and not module.params.get('type'): + if not module.params.get("health_check_id") and not module.params.get("type"): module.fail_json(msg="parameter 'type' is required if not updating or deleting health check by ID.") - state_in = module.params.get('state') - ip_addr_in = module.params.get('ip_address') - port_in = module.params.get('port') - type_in = module.params.get('type') - resource_path_in = module.params.get('resource_path') - fqdn_in = module.params.get('fqdn') - string_match_in = module.params.get('string_match') - request_interval_in = module.params.get('request_interval') - failure_threshold_in = module.params.get('failure_threshold') - health_check_name = module.params.get('health_check_name') - tags = module.params.get('tags') + state_in = module.params.get("state") + ip_addr_in = module.params.get("ip_address") + port_in = module.params.get("port") + type_in = module.params.get("type") + fqdn_in = module.params.get("fqdn") + string_match_in = module.params.get("string_match") + request_interval_in = module.params.get("request_interval") + health_check_name = module.params.get("health_check_name") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + child_health_checks_in = module.params.get("child_health_checks") + health_threshold_in = module.params.get("health_threshold") # Default port if port_in is None: - if type_in in ['HTTP', 'HTTP_STR_MATCH']: + if type_in in ["HTTP", "HTTP_STR_MATCH"]: port_in = 80 - elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']: + elif type_in in ["HTTPS", "HTTPS_STR_MATCH"]: port_in = 443 if string_match_in: - if type_in not in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + if type_in not in ["HTTP_STR_MATCH", "HTTPS_STR_MATCH"]: module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types") if len(string_match_in) > 255: module.fail_json(msg="parameter 'string_match' is limited to 255 characters max") - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("route53", retry_decorator=AWSRetry.jittered_backoff()) changed = False action = None check_id = None - if module.params.get('use_unique_names') or module.params.get('health_check_id'): - module.deprecate( - 'The health_check_name is currently non required parameter.' - ' This behavior will change and health_check_name ' - ' will change to required=True and use_unique_names will change to default=True in release 6.0.0.', - version='6.0.0', collection_name='amazon.aws') - # If update or delete Health Check based on ID update_delete_by_id = False - if module.params.get('health_check_id'): + if module.params.get("health_check_id"): update_delete_by_id = True - id_to_update_delete = module.params.get('health_check_id') + id_to_update_delete = module.params.get("health_check_id") try: - existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)['HealthCheck'] + existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)["HealthCheck"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.exit_json(changed=False, msg='The specified health check with ID: {0} does not exist'.format(id_to_update_delete)) + module.exit_json( + changed=False, msg=f"The specified health check with ID: {id_to_update_delete} does not exist" + ) else: existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) if existing_check: - check_id = existing_check.get('Id') + check_id = existing_check.get("Id") # Delete Health Check - if state_in == 'absent': + if state_in == "absent": if update_delete_by_id: changed, action = delete_health_check(id_to_update_delete) else: @@ -629,44 +668,50 @@ def main(): check_id = None # Create Health Check - elif state_in == 'present': - if existing_check is None and not module.params.get('use_unique_names') and not update_delete_by_id: - changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + elif state_in == "present": + if existing_check is None and not module.params.get("use_unique_names") and not update_delete_by_id: + changed, action, check_id = create_health_check( + ip_addr_in, fqdn_in, type_in, request_interval_in, port_in, child_health_checks_in, health_threshold_in + ) # Update Health Check else: # If health_check_name is a unique identifier - if module.params.get('use_unique_names'): + if module.params.get("use_unique_names"): existing_checks_with_name = get_existing_checks_with_name() + if tags is None: + purge_tags = False + tags = {} + tags["Name"] = health_check_name + # update the health_check if another health check with same name exists if health_check_name in existing_checks_with_name: - changed, action = update_health_check(existing_checks_with_name[health_check_name]) + changed, action, check_id = update_health_check(existing_checks_with_name[health_check_name]) else: # create a new health_check if another health check with same name does not exists - changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) - # Add tag to add name to health check - if check_id: - if not tags: - tags = {} - tags['Name'] = health_check_name + changed, action, check_id = create_health_check( + ip_addr_in, + fqdn_in, + type_in, + request_interval_in, + port_in, + child_health_checks_in, + health_threshold_in, + ) else: - if update_delete_by_id: - changed, action = update_health_check(existing_check) - else: - changed, action = update_health_check(existing_check) + changed, action, check_id = update_health_check(existing_check) if check_id: - changed |= manage_tags(module, client, 'healthcheck', check_id, - tags, module.params.get('purge_tags')) + changed |= manage_tags(module, client, "healthcheck", check_id, tags, purge_tags) health_check = describe_health_check(id=check_id) - health_check['action'] = action + health_check["action"] = action module.exit_json( changed=changed, health_check=health_check, ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_info.py b/ansible_collections/amazon/aws/plugins/modules/route53_info.py index 0342aef6f..909ee0ae3 100644 --- a/ansible_collections/amazon/aws/plugins/modules/route53_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/route53_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: route53_info short_description: Retrieves route53 details using AWS methods version_added: 5.0.0 @@ -130,13 +128,12 @@ options: author: - Karen Cheng (@Etherdaemon) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of listing all hosted zones - name: List all hosted zones amazon.aws.route53_info: @@ -205,9 +202,9 @@ EXAMPLES = r''' hosted_zone_id: "{{ AWSINFO.zone_id }}" start_record_name: "host1.workshop.test.io" register: RECORDS -''' +""" -RETURN = r''' +RETURN = r""" resource_record_sets: description: A list of resource record sets returned by list_resource_record_sets in boto3. returned: when I(query=record_sets) @@ -519,18 +516,17 @@ HealthCheck: This field is deprecated and will be removed in 6.0.0 version release. type: dict returned: when I(query=health_check) and I(health_check_method=details) -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry # Split out paginator to allow for the backoff decorator to function @@ -543,8 +539,8 @@ def _paginated_result(paginator_name, **params): def get_hosted_zone(): params = dict() - if module.params.get('hosted_zone_id'): - params['Id'] = module.params.get('hosted_zone_id') + if module.params.get("hosted_zone_id"): + params["Id"] = module.params.get("hosted_zone_id") else: module.fail_json(msg="Hosted Zone Id is required") @@ -554,23 +550,28 @@ def get_hosted_zone(): def reusable_delegation_set_details(): params = dict() - if not module.params.get('delegation_set_id'): - if module.params.get('max_items'): - params['MaxItems'] = str(module.params.get('max_items')) + if not module.params.get("delegation_set_id"): + if module.params.get("max_items"): + params["MaxItems"] = str(module.params.get("max_items")) - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') + if module.params.get("next_marker"): + params["Marker"] = module.params.get("next_marker") results = client.list_reusable_delegation_sets(**params) else: - params['DelegationSetId'] = module.params.get('delegation_set_id') + params["DelegationSetId"] = module.params.get("delegation_set_id") results = client.get_reusable_delegation_set(**params) - results['delegation_sets'] = results['DelegationSets'] - module.deprecate("The 'CamelCase' return values with key 'DelegationSets' is deprecated and \ - will be replaced by 'snake_case' return values with key 'delegation_sets'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + results["delegation_sets"] = results["DelegationSets"] + module.deprecate( + ( + "The 'CamelCase' return values with key 'DelegationSets' is deprecated and will be" + " replaced by 'snake_case' return values with key 'delegation_sets'. Both case values" + " are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return results @@ -579,24 +580,27 @@ def list_hosted_zones(): params = dict() # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) + if module.params.get("max_items"): + params["PaginationConfig"] = dict(MaxItems=module.params.get("max_items")) - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') + if module.params.get("next_marker"): + params["Marker"] = module.params.get("next_marker") - if module.params.get('delegation_set_id'): - params['DelegationSetId'] = module.params.get('delegation_set_id') + if module.params.get("delegation_set_id"): + params["DelegationSetId"] = module.params.get("delegation_set_id") - zones = _paginated_result('list_hosted_zones', **params)['HostedZones'] + zones = _paginated_result("list_hosted_zones", **params)["HostedZones"] snaked_zones = [camel_dict_to_snake_dict(zone) for zone in zones] - module.deprecate("The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'hosted_zones'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + module.deprecate( + ( + "The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'hosted_zones'. Both case" + " values are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return { "HostedZones": zones, @@ -608,14 +612,14 @@ def list_hosted_zones(): def list_hosted_zones_by_name(): params = dict() - if module.params.get('hosted_zone_id'): - params['HostedZoneId'] = module.params.get('hosted_zone_id') + if module.params.get("hosted_zone_id"): + params["HostedZoneId"] = module.params.get("hosted_zone_id") - if module.params.get('dns_name'): - params['DNSName'] = module.params.get('dns_name') + if module.params.get("dns_name"): + params["DNSName"] = module.params.get("dns_name") - if module.params.get('max_items'): - params['MaxItems'] = str(module.params.get('max_items')) + if module.params.get("max_items"): + params["MaxItems"] = str(module.params.get("max_items")) return client.list_hosted_zones_by_name(**params) @@ -623,8 +627,8 @@ def list_hosted_zones_by_name(): def change_details(): params = dict() - if module.params.get('change_id'): - params['Id'] = module.params.get('change_id') + if module.params.get("change_id"): + params["Id"] = module.params.get("change_id") else: module.fail_json(msg="change_id is required") @@ -634,17 +638,22 @@ def change_details(): def checker_ip_range_details(): results = client.get_checker_ip_ranges() - results['checker_ip_ranges'] = results['CheckerIpRanges'] - module.deprecate("The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and \ - will be replaced by 'snake_case' return values with key 'checker_ip_ranges'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + results["checker_ip_ranges"] = results["CheckerIpRanges"] + module.deprecate( + ( + "The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and will be" + " replaced by 'snake_case' return values with key 'checker_ip_ranges'. Both case values" + " are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return results def get_count(): - if module.params.get('query') == 'health_check': + if module.params.get("query") == "health_check": results = client.get_health_check_count() else: results = client.get_hosted_zone_count() @@ -656,29 +665,31 @@ def get_health_check(): params = dict() results = dict() - if not module.params.get('health_check_id'): + if not module.params.get("health_check_id"): module.fail_json(msg="health_check_id is required") else: - params['HealthCheckId'] = module.params.get('health_check_id') + params["HealthCheckId"] = module.params.get("health_check_id") - if module.params.get('health_check_method') == 'details': + if module.params.get("health_check_method") == "details": results = client.get_health_check(**params) results["health_check"] = camel_dict_to_snake_dict(results["HealthCheck"]) module.deprecate( - "The 'CamelCase' return values with key 'HealthCheck' is deprecated \ - and will be replaced by 'snake_case' return values with key 'health_check'. \ - Both case values are returned for now.", + ( + "The 'CamelCase' return values with key 'HealthCheck' is deprecated and will be" + " replaced by 'snake_case' return values with key 'health_check'. Both case values are" + " returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) - elif module.params.get('health_check_method') == 'failure_reason': + elif module.params.get("health_check_method") == "failure_reason": response = client.get_health_check_last_failure_reason(**params) results["health_check_observations"] = [ camel_dict_to_snake_dict(health_check) for health_check in response["HealthCheckObservations"] ] - elif module.params.get('health_check_method') == 'status': + elif module.params.get("health_check_method") == "status": response = client.get_health_check_status(**params) results["health_check_observations"] = [ camel_dict_to_snake_dict(health_check) for health_check in response["HealthCheckObservations"] @@ -690,15 +701,15 @@ def get_health_check(): def get_resource_tags(): params = dict() - if module.params.get('resource_id'): - params['ResourceIds'] = module.params.get('resource_id') + if module.params.get("resource_id"): + params["ResourceIds"] = module.params.get("resource_id") else: module.fail_json(msg="resource_id or resource_ids is required") - if module.params.get('query') == 'health_check': - params['ResourceType'] = 'healthcheck' + if module.params.get("query") == "health_check": + params["ResourceType"] = "healthcheck" else: - params['ResourceType'] = 'hostedzone' + params["ResourceType"] = "hostedzone" return client.list_tags_for_resources(**params) @@ -706,22 +717,25 @@ def get_resource_tags(): def list_health_checks(): params = dict() - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') + if module.params.get("next_marker"): + params["Marker"] = module.params.get("next_marker") # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) + if module.params.get("max_items"): + params["PaginationConfig"] = dict(MaxItems=module.params.get("max_items")) - health_checks = _paginated_result('list_health_checks', **params)['HealthChecks'] + health_checks = _paginated_result("list_health_checks", **params)["HealthChecks"] snaked_health_checks = [camel_dict_to_snake_dict(health_check) for health_check in health_checks] - module.deprecate("The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'health_checks'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + module.deprecate( + ( + "The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'health_checks'. Both case" + " values are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return { "HealthChecks": health_checks, @@ -733,34 +747,37 @@ def list_health_checks(): def record_sets_details(): params = dict() - if module.params.get('hosted_zone_id'): - params['HostedZoneId'] = module.params.get('hosted_zone_id') + if module.params.get("hosted_zone_id"): + params["HostedZoneId"] = module.params.get("hosted_zone_id") else: module.fail_json(msg="Hosted Zone Id is required") - if module.params.get('start_record_name'): - params['StartRecordName'] = module.params.get('start_record_name') + if module.params.get("start_record_name"): + params["StartRecordName"] = module.params.get("start_record_name") # Check that both params are set if type is applied - if module.params.get('type') and not module.params.get('start_record_name'): + if module.params.get("type") and not module.params.get("start_record_name"): module.fail_json(msg="start_record_name must be specified if type is set") - if module.params.get('type'): - params['StartRecordType'] = module.params.get('type') + if module.params.get("type"): + params["StartRecordType"] = module.params.get("type") # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) + if module.params.get("max_items"): + params["PaginationConfig"] = dict(MaxItems=module.params.get("max_items")) - record_sets = _paginated_result('list_resource_record_sets', **params)['ResourceRecordSets'] + record_sets = _paginated_result("list_resource_record_sets", **params)["ResourceRecordSets"] snaked_record_sets = [camel_dict_to_snake_dict(record_set) for record_set in record_sets] - module.deprecate("The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'resource_record_sets'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + module.deprecate( + ( + "The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'resource_record_sets'." + " Both case values are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return { "ResourceRecordSets": record_sets, @@ -771,28 +788,28 @@ def record_sets_details(): def health_check_details(): health_check_invocations = { - 'list': list_health_checks, - 'details': get_health_check, - 'status': get_health_check, - 'failure_reason': get_health_check, - 'count': get_count, - 'tags': get_resource_tags, + "list": list_health_checks, + "details": get_health_check, + "status": get_health_check, + "failure_reason": get_health_check, + "count": get_count, + "tags": get_resource_tags, } - results = health_check_invocations[module.params.get('health_check_method')]() + results = health_check_invocations[module.params.get("health_check_method")]() return results def hosted_zone_details(): hosted_zone_invocations = { - 'details': get_hosted_zone, - 'list': list_hosted_zones, - 'list_by_name': list_hosted_zones_by_name, - 'count': get_count, - 'tags': get_resource_tags, + "details": get_hosted_zone, + "list": list_hosted_zones, + "list_by_name": list_hosted_zones_by_name, + "count": get_count, + "tags": get_resource_tags, } - results = hosted_zone_invocations[module.params.get('hosted_zone_method')]() + results = hosted_zone_invocations[module.params.get("hosted_zone_method")]() return results @@ -801,74 +818,75 @@ def main(): global client argument_spec = dict( - query=dict(choices=[ - 'change', - 'checker_ip_range', - 'health_check', - 'hosted_zone', - 'record_sets', - 'reusable_delegation_set', - ], required=True), + query=dict( + choices=[ + "change", + "checker_ip_range", + "health_check", + "hosted_zone", + "record_sets", + "reusable_delegation_set", + ], + required=True, + ), change_id=dict(), hosted_zone_id=dict(), - max_items=dict(type='int'), + max_items=dict(type="int"), next_marker=dict(), delegation_set_id=dict(), start_record_name=dict(), - type=dict(type='str', choices=[ - 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' - ]), + type=dict( + type="str", + choices=["A", "CNAME", "MX", "AAAA", "TXT", "PTR", "SRV", "SPF", "CAA", "NS", "NAPTR", "SOA", "DS"], + ), dns_name=dict(), - resource_id=dict(type='list', aliases=['resource_ids'], elements='str'), + resource_id=dict(type="list", aliases=["resource_ids"], elements="str"), health_check_id=dict(), - hosted_zone_method=dict(choices=[ - 'details', - 'list', - 'list_by_name', - 'count', - 'tags' - ], default='list'), - health_check_method=dict(choices=[ - 'list', - 'details', - 'status', - 'failure_reason', - 'count', - 'tags', - ], default='list'), + hosted_zone_method=dict(choices=["details", "list", "list_by_name", "count", "tags"], default="list"), + health_check_method=dict( + choices=[ + "list", + "details", + "status", + "failure_reason", + "count", + "tags", + ], + default="list", + ), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['hosted_zone_method', 'health_check_method'], + ["hosted_zone_method", "health_check_method"], ], check_boto3=False, ) try: - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("route53", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") invocations = { - 'change': change_details, - 'checker_ip_range': checker_ip_range_details, - 'health_check': health_check_details, - 'hosted_zone': hosted_zone_details, - 'record_sets': record_sets_details, - 'reusable_delegation_set': reusable_delegation_set_details, + "change": change_details, + "checker_ip_range": checker_ip_range_details, + "health_check": health_check_details, + "hosted_zone": hosted_zone_details, + "record_sets": record_sets_details, + "reusable_delegation_set": reusable_delegation_set_details, } results = dict(changed=False) try: - results = invocations[module.params.get('query')]() + results = invocations[module.params.get("query")]() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Query failed") module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_zone.py b/ansible_collections/amazon/aws/plugins/modules/route53_zone.py index ac549ba56..5bc982d19 100644 --- a/ansible_collections/amazon/aws/plugins/modules/route53_zone.py +++ b/ansible_collections/amazon/aws/plugins/modules/route53_zone.py @@ -1,11 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: route53_zone short_description: add or delete Route53 zones version_added: 5.0.0 @@ -65,17 +64,17 @@ options: - Note that you can't associate a reusable delegation set with a private hosted zone. type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. author: - "Christopher Troup (@minichate)" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: create a public zone amazon.aws.route53_zone: zone: example.com @@ -114,18 +113,18 @@ EXAMPLES = r''' zone: example.com comment: this is an example tags: - Owner: Ansible Team + Owner: Ansible Team - name: modify a public zone, removing all previous tags and adding a new one amazon.aws.route53_zone: zone: example.com comment: this is an example tags: - Support: Ansible Community + Support: Ansible Community purge_tags: true -''' +""" -RETURN = r''' +RETURN = r""" comment: description: optional hosted zone comment returned: when hosted zone exists @@ -183,23 +182,25 @@ tags: description: tags associated with the zone returned: when tags are defined type: dict -''' +""" import time -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.route53 import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule @AWSRetry.jittered_backoff() def _list_zones(): - paginator = client.get_paginator('list_hosted_zones') + paginator = client.get_paginator("list_hosted_zones") return paginator.paginate().build_full_result() @@ -209,41 +210,42 @@ def find_zones(zone_in, private_zone): except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not list current hosted zones") zones = [] - for r53zone in results['HostedZones']: - if r53zone['Name'] != zone_in: + for r53zone in results["HostedZones"]: + if r53zone["Name"] != zone_in: continue # only save zone names that match the public/private setting - if (r53zone['Config']['PrivateZone'] and private_zone) or \ - (not r53zone['Config']['PrivateZone'] and not private_zone): + if (r53zone["Config"]["PrivateZone"] and private_zone) or ( + not r53zone["Config"]["PrivateZone"] and not private_zone + ): zones.append(r53zone) return zones def create(matching_zones): - zone_in = module.params.get('zone').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - vpcs = module.params.get('vpcs') or ([{'id': vpc_id, 'region': vpc_region}] if vpc_id and vpc_region else None) - comment = module.params.get('comment') - delegation_set_id = module.params.get('delegation_set_id') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - if not zone_in.endswith('.'): + zone_in = module.params.get("zone").lower() + vpc_id = module.params.get("vpc_id") + vpc_region = module.params.get("vpc_region") + vpcs = module.params.get("vpcs") or ([{"id": vpc_id, "region": vpc_region}] if vpc_id and vpc_region else None) + comment = module.params.get("comment") + delegation_set_id = module.params.get("delegation_set_id") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + if not zone_in.endswith("."): zone_in += "." private_zone = bool(vpcs) record = { - 'private_zone': private_zone, - 'vpc_id': vpcs and vpcs[0]['id'], # The first one for backwards compatibility - 'vpc_region': vpcs and vpcs[0]['region'], # The first one for backwards compatibility - 'vpcs': vpcs, - 'comment': comment, - 'name': zone_in, - 'delegation_set_id': delegation_set_id, - 'zone_id': None, + "private_zone": private_zone, + "vpc_id": vpcs and vpcs[0]["id"], # The first one for backwards compatibility + "vpc_region": vpcs and vpcs[0]["region"], # The first one for backwards compatibility + "vpcs": vpcs, + "comment": comment, + "name": zone_in, + "delegation_set_id": delegation_set_id, + "zone_id": None, } if private_zone: @@ -251,13 +253,13 @@ def create(matching_zones): else: changed, result = create_or_update_public(matching_zones, record) - zone_id = result.get('zone_id') + zone_id = result.get("zone_id") if zone_id: if tags is not None: - changed |= manage_tags(module, client, 'hostedzone', zone_id, tags, purge_tags) - result['tags'] = get_tags(module, client, 'hostedzone', zone_id) + changed |= manage_tags(module, client, "hostedzone", zone_id, tags, purge_tags) + result["tags"] = get_tags(module, client, "hostedzone", zone_id) else: - result['tags'] = tags + result["tags"] = tags return changed, result @@ -265,70 +267,73 @@ def create(matching_zones): def create_or_update_private(matching_zones, record): for z in matching_zones: try: - result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids + result = client.get_hosted_zone(Id=z["Id"]) # could be in different regions or have different VPCids except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) - zone_details = result['HostedZone'] - vpc_details = result['VPCs'] + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {z['Id']}") + zone_details = result["HostedZone"] + vpc_details = result["VPCs"] current_vpc_ids = None current_vpc_regions = None matching = False - if isinstance(vpc_details, dict) and len(record['vpcs']) == 1: - if vpc_details['VPC']['VPCId'] == record['vpcs'][0]['id']: - current_vpc_ids = [vpc_details['VPC']['VPCId']] - current_vpc_regions = [vpc_details['VPC']['VPCRegion']] + if isinstance(vpc_details, dict) and len(record["vpcs"]) == 1: + if vpc_details["VPC"]["VPCId"] == record["vpcs"][0]["id"]: + current_vpc_ids = [vpc_details["VPC"]["VPCId"]] + current_vpc_regions = [vpc_details["VPC"]["VPCRegion"]] matching = True else: # Sort the lists and compare them to make sure they contain the same items - if (sorted([vpc['id'] for vpc in record['vpcs']]) == sorted([v['VPCId'] for v in vpc_details]) - and sorted([vpc['region'] for vpc in record['vpcs']]) == sorted([v['VPCRegion'] for v in vpc_details])): - current_vpc_ids = [vpc['id'] for vpc in record['vpcs']] - current_vpc_regions = [vpc['region'] for vpc in record['vpcs']] + if sorted([vpc["id"] for vpc in record["vpcs"]]) == sorted([v["VPCId"] for v in vpc_details]) and sorted( + [vpc["region"] for vpc in record["vpcs"]] + ) == sorted([v["VPCRegion"] for v in vpc_details]): + current_vpc_ids = [vpc["id"] for vpc in record["vpcs"]] + current_vpc_regions = [vpc["region"] for vpc in record["vpcs"]] matching = True if matching: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') - if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: + record["zone_id"] = zone_details["Id"].replace("/hostedzone/", "") + if "Comment" in zone_details["Config"] and zone_details["Config"]["Comment"] != record["comment"]: if not module.check_mode: try: - client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment']) + client.update_hosted_zone_comment(Id=zone_details["Id"], Comment=record["comment"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) + module.fail_json_aws(e, msg=f"Could not update comment for hosted zone {zone_details['Id']}") return True, record else: - record['msg'] = "There is already a private hosted zone in the same region with the same VPC(s) \ - you chose. Unable to create a new private hosted zone in the same name space." + record["msg"] = ( + "There is already a private hosted zone in the same region with the same VPC(s)" + " you chose. Unable to create a new private hosted zone in the same name space." + ) return False, record if not module.check_mode: try: result = client.create_hosted_zone( - Name=record['name'], + Name=record["name"], HostedZoneConfig={ - 'Comment': record['comment'] if record['comment'] is not None else "", - 'PrivateZone': True, + "Comment": record["comment"] if record["comment"] is not None else "", + "PrivateZone": True, }, VPC={ - 'VPCRegion': record['vpcs'][0]['region'], - 'VPCId': record['vpcs'][0]['id'], + "VPCRegion": record["vpcs"][0]["region"], + "VPCId": record["vpcs"][0]["id"], }, - CallerReference="%s-%s" % (record['name'], time.time()), + CallerReference=f"{record['name']}-{time.time()}", ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not create hosted zone") - hosted_zone = result['HostedZone'] - zone_id = hosted_zone['Id'].replace('/hostedzone/', '') - record['zone_id'] = zone_id + hosted_zone = result["HostedZone"] + zone_id = hosted_zone["Id"].replace("/hostedzone/", "") + record["zone_id"] = zone_id - if len(record['vpcs']) > 1: - for vpc in record['vpcs'][1:]: + if len(record["vpcs"]) > 1: + for vpc in record["vpcs"][1:]: try: result = client.associate_vpc_with_hosted_zone( HostedZoneId=zone_id, VPC={ - 'VPCRegion': vpc['region'], - 'VPCId': vpc['id'], + "VPCRegion": vpc["region"], + "VPCId": vpc["id"], }, ) except (BotoCoreError, ClientError) as e: @@ -342,20 +347,17 @@ def create_or_update_public(matching_zones, record): zone_details, zone_delegation_set_details = None, {} for matching_zone in matching_zones: try: - zone = client.get_hosted_zone(Id=matching_zone['Id']) - zone_details = zone['HostedZone'] - zone_delegation_set_details = zone.get('DelegationSet', {}) + zone = client.get_hosted_zone(Id=matching_zone["Id"]) + zone_details = zone["HostedZone"] + zone_delegation_set_details = zone.get("DelegationSet", {}) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id']) - if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {matching_zone['Id']}") + if "Comment" in zone_details["Config"] and zone_details["Config"]["Comment"] != record["comment"]: if not module.check_mode: try: - client.update_hosted_zone_comment( - Id=zone_details['Id'], - Comment=record['comment'] - ) + client.update_hosted_zone_comment(Id=zone_details["Id"], Comment=record["comment"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) + module.fail_json_aws(e, msg=f"Could not update comment for hosted zone {zone_details['Id']}") changed = True else: changed = False @@ -365,20 +367,20 @@ def create_or_update_public(matching_zones, record): if not module.check_mode: try: params = dict( - Name=record['name'], + Name=record["name"], HostedZoneConfig={ - 'Comment': record['comment'] if record['comment'] is not None else "", - 'PrivateZone': False, + "Comment": record["comment"] if record["comment"] is not None else "", + "PrivateZone": False, }, - CallerReference="%s-%s" % (record['name'], time.time()), + CallerReference=f"{record['name']}-{time.time()}", ) - if record.get('delegation_set_id') is not None: - params['DelegationSetId'] = record['delegation_set_id'] + if record.get("delegation_set_id") is not None: + params["DelegationSetId"] = record["delegation_set_id"] result = client.create_hosted_zone(**params) - zone_details = result['HostedZone'] - zone_delegation_set_details = result.get('DelegationSet', {}) + zone_details = result["HostedZone"] + zone_delegation_set_details = result.get("DelegationSet", {}) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not create hosted zone") @@ -386,11 +388,11 @@ def create_or_update_public(matching_zones, record): if module.check_mode: if zone_details: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') + record["zone_id"] = zone_details["Id"].replace("/hostedzone/", "") else: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') - record['name'] = zone_details['Name'] - record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '') + record["zone_id"] = zone_details["Id"].replace("/hostedzone/", "") + record["name"] = zone_details["Name"] + record["delegation_set_id"] = zone_delegation_set_details.get("Id", "").replace("/delegationset/", "") return changed, record @@ -398,29 +400,30 @@ def create_or_update_public(matching_zones, record): def delete_private(matching_zones, vpcs): for z in matching_zones: try: - result = client.get_hosted_zone(Id=z['Id']) + result = client.get_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) - zone_details = result['HostedZone'] - vpc_details = result['VPCs'] + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {z['Id']}") + zone_details = result["HostedZone"] + vpc_details = result["VPCs"] if isinstance(vpc_details, dict): - if vpc_details['VPC']['VPCId'] == vpcs[0]['id'] and vpcs[0]['region'] == vpc_details['VPC']['VPCRegion']: + if vpc_details["VPC"]["VPCId"] == vpcs[0]["id"] and vpcs[0]["region"] == vpc_details["VPC"]["VPCRegion"]: if not module.check_mode: try: - client.delete_hosted_zone(Id=z['Id']) + client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) - return True, "Successfully deleted %s" % zone_details['Name'] + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") + return True, f"Successfully deleted {zone_details['Name']}" else: # Sort the lists and compare them to make sure they contain the same items - if (sorted([vpc['id'] for vpc in vpcs]) == sorted([v['VPCId'] for v in vpc_details]) - and sorted([vpc['region'] for vpc in vpcs]) == sorted([v['VPCRegion'] for v in vpc_details])): + if sorted([vpc["id"] for vpc in vpcs]) == sorted([v["VPCId"] for v in vpc_details]) and sorted( + [vpc["region"] for vpc in vpcs] + ) == sorted([v["VPCRegion"] for v in vpc_details]): if not module.check_mode: try: - client.delete_hosted_zone(Id=z['Id']) + client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) - return True, "Successfully deleted %s" % zone_details['Name'] + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") + return True, f"Successfully deleted {zone_details['Name']}" return False, "The VPCs do not match a private hosted zone." @@ -432,11 +435,11 @@ def delete_public(matching_zones): else: if not module.check_mode: try: - client.delete_hosted_zone(Id=matching_zones[0]['Id']) + client.delete_hosted_zone(Id=matching_zones[0]["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id']) + module.fail_json_aws(e, msg=f"Could not get delete hosted zone {matching_zones[0]['Id']}") changed = True - msg = "Successfully deleted %s" % matching_zones[0]['Id'] + msg = f"Successfully deleted {matching_zones[0]['Id']}" return changed, msg @@ -444,41 +447,41 @@ def delete_hosted_id(hosted_zone_id, matching_zones): if hosted_zone_id == "all": deleted = [] for z in matching_zones: - deleted.append(z['Id']) + deleted.append(z["Id"]) if not module.check_mode: try: - client.delete_hosted_zone(Id=z['Id']) + client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") changed = True - msg = "Successfully deleted zones: %s" % deleted - elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]: + msg = f"Successfully deleted zones: {deleted}" + elif hosted_zone_id in [zo["Id"].replace("/hostedzone/", "") for zo in matching_zones]: if not module.check_mode: try: client.delete_hosted_zone(Id=hosted_zone_id) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id) + module.fail_json_aws(e, msg=f"Could not delete hosted zone {hosted_zone_id}") changed = True - msg = "Successfully deleted zone: %s" % hosted_zone_id + msg = f"Successfully deleted zone: {hosted_zone_id}" else: changed = False - msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id + msg = f"There is no zone to delete that matches hosted_zone_id {hosted_zone_id}." return changed, msg def delete(matching_zones): - zone_in = module.params.get('zone').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - vpcs = module.params.get('vpcs') or ([{'id': vpc_id, 'region': vpc_region}] if vpc_id and vpc_region else None) - hosted_zone_id = module.params.get('hosted_zone_id') + zone_in = module.params.get("zone").lower() + vpc_id = module.params.get("vpc_id") + vpc_region = module.params.get("vpc_region") + vpcs = module.params.get("vpcs") or ([{"id": vpc_id, "region": vpc_region}] if vpc_id and vpc_region else None) + hosted_zone_id = module.params.get("hosted_zone_id") - if not zone_in.endswith('.'): + if not zone_in.endswith("."): zone_in += "." private_zone = bool(vpcs) - if zone_in in [z['Name'] for z in matching_zones]: + if zone_in in [z["Name"] for z in matching_zones]: if hosted_zone_id: changed, result = delete_hosted_id(hosted_zone_id, matching_zones) else: @@ -499,26 +502,25 @@ def main(): argument_spec = dict( zone=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), vpc_id=dict(default=None), vpc_region=dict(default=None), - vpcs=dict(type='list', default=None, elements='dict', options=dict( - id=dict(required=True), - region=dict(required=True) - )), - comment=dict(default=''), + vpcs=dict( + type="list", default=None, elements="dict", options=dict(id=dict(required=True), region=dict(required=True)) + ), + comment=dict(default=""), hosted_zone_id=dict(), delegation_set_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) mutually_exclusive = [ - ['delegation_set_id', 'vpc_id'], - ['delegation_set_id', 'vpc_region'], - ['delegation_set_id', 'vpcs'], - ['vpcs', 'vpc_id'], - ['vpcs', 'vpc_region'], + ["delegation_set_id", "vpc_id"], + ["delegation_set_id", "vpc_region"], + ["delegation_set_id", "vpcs"], + ["vpcs", "vpc_id"], + ["vpcs", "vpc_region"], ] module = AnsibleAWSModule( @@ -527,23 +529,23 @@ def main(): supports_check_mode=True, ) - zone_in = module.params.get('zone').lower() - state = module.params.get('state').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - vpcs = module.params.get('vpcs') + zone_in = module.params.get("zone").lower() + state = module.params.get("state").lower() + vpc_id = module.params.get("vpc_id") + vpc_region = module.params.get("vpc_region") + vpcs = module.params.get("vpcs") - if not zone_in.endswith('.'): + if not zone_in.endswith("."): zone_in += "." private_zone = bool(vpcs or (vpc_id and vpc_region)) - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("route53", retry_decorator=AWSRetry.jittered_backoff()) zones = find_zones(zone_in, private_zone) - if state == 'present': + if state == "present": changed, result = create(matching_zones=zones) - elif state == 'absent': + elif state == "absent": changed, result = delete(matching_zones=zones) if isinstance(result, dict): @@ -552,5 +554,5 @@ def main(): module.exit_json(changed=changed, result=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py index a4e2a8f56..d68223ede 100644 --- a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py +++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py @@ -1,23 +1,10 @@ #!/usr/bin/python -# -# This is a free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This Ansible library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this library. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: s3_bucket version_added: 1.0.0 @@ -78,8 +65,10 @@ options: choices: [ 'none', 'AES256', 'aws:kms' ] type: str encryption_key_id: - description: KMS master key ID to use for the default encryption. This parameter is allowed if I(encryption) is C(aws:kms). If - not specified then it will default to the AWS provided KMS key. + description: + - KMS master key ID to use for the default encryption. + - If not specified then it will default to the AWS provided KMS key. + - This parameter is only supported if I(encryption) is C(aws:kms). type: str bucket_key_enabled: description: @@ -170,10 +159,17 @@ options: type: bool version_added: 3.1.0 default: True + dualstack: + description: + - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Mutually exclusive with I(endpoint_url). + type: bool + default: false + version_added: 6.0.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 @@ -188,9 +184,9 @@ notes: - Support for the C(S3_URL) environment variable has been deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter or the C(AWS_URL) environment variable. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create a simple S3 bucket @@ -255,11 +251,11 @@ EXAMPLES = r''' name: mys3bucket state: present public_access: - block_public_acls: true - ignore_public_acls: true - ## keys == 'false' can be omitted, undefined keys defaults to 'false' - # block_public_policy: false - # restrict_public_buckets: false + block_public_acls: true + ignore_public_acls: true + ## keys == 'false' can be omitted, undefined keys defaults to 'false' + # block_public_policy: false + # restrict_public_buckets: false # Delete public policy block from bucket - amazon.aws.s3_bucket: @@ -290,9 +286,9 @@ EXAMPLES = r''' name: mys3bucket state: present acl: public-read -''' +""" -RETURN = r''' +RETURN = r""" encryption: description: - Server-side encryption of the objects in the S3 bucket. @@ -352,10 +348,9 @@ acl: type: dict returned: I(state=present) sample: 'public-read' -''' +""" import json -import os import time try: @@ -364,23 +359,20 @@ except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.basic import to_text +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types -from ansible.module_utils.six.moves.urllib.parse import urlparse - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.s3 import s3_extra_params +from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -def create_or_update_bucket(s3_client, module, location): +def create_or_update_bucket(s3_client, module): policy = module.params.get("policy") name = module.params.get("name") requester_pays = module.params.get("requester_pays") @@ -396,41 +388,52 @@ def create_or_update_bucket(s3_client, module, location): object_ownership = module.params.get("object_ownership") object_lock_enabled = module.params.get("object_lock_enabled") acl = module.params.get("acl") + # default to US Standard region, + # note: module.region will also try to pull a default out of the boto3 configs. + location = module.region or "us-east-1" + changed = False result = {} try: bucket_is_present = bucket_exists(s3_client, name) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to check bucket presence") if not bucket_is_present: try: bucket_changed = create_bucket(s3_client, name, location, object_lock_enabled) - s3_client.get_waiter('bucket_exists').wait(Bucket=name) + s3_client.get_waiter("bucket_exists").wait(Bucket=name) changed = changed or bucket_changed except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available') + module.fail_json_aws(e, msg="An error occurred waiting for the bucket to become available") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed while creating bucket") # Versioning try: versioning_status = get_bucket_versioning(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if versioning is not None: + module.fail_json_aws(e, msg="Bucket versioning is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if versioning is not None: module.fail_json_aws(e, msg="Failed to get bucket versioning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket versioning") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket versioning") else: if versioning is not None: required_versioning = None - if versioning and versioning_status.get('Status') != "Enabled": - required_versioning = 'Enabled' - elif not versioning and versioning_status.get('Status') == "Enabled": - required_versioning = 'Suspended' + if versioning and versioning_status.get("Status") != "Enabled": + required_versioning = "Enabled" + elif not versioning and versioning_status.get("Status") == "Enabled": + required_versioning = "Suspended" if required_versioning: try: @@ -442,22 +445,29 @@ def create_or_update_bucket(s3_client, module, location): versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning) # This output format is there to ensure compatibility with previous versions of the module - result['versioning'] = { - 'Versioning': versioning_status.get('Status', 'Disabled'), - 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), + result["versioning"] = { + "Versioning": versioning_status.get("Status", "Disabled"), + "MfaDelete": versioning_status.get("MFADelete", "Disabled"), } # Requester pays try: requester_pays_status = get_bucket_request_payment(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if requester_pays is not None: + module.fail_json_aws(e, msg="Bucket request payment is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if requester_pays is not None: module.fail_json_aws(e, msg="Failed to get bucket request payment") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket request payment") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket request payment") else: if requester_pays is not None: - payer = 'Requester' if requester_pays else 'BucketOwner' + payer = "Requester" if requester_pays else "BucketOwner" if requester_pays_status != payer: put_bucket_request_payment(s3_client, name, payer) requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False) @@ -468,7 +478,7 @@ def create_or_update_bucket(s3_client, module, location): requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True) changed = True - result['requester_pays'] = requester_pays + result["requester_pays"] = requester_pays # Public access clock configuration current_public_access = {} @@ -510,10 +520,17 @@ def create_or_update_bucket(s3_client, module, location): # Policy try: current_policy = get_bucket_policy(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if policy is not None: + module.fail_json_aws(e, msg="Bucket policy is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if policy is not None: module.fail_json_aws(e, msg="Failed to get bucket policy") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket policy") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket policy") else: if policy is not None: @@ -540,15 +557,22 @@ def create_or_update_bucket(s3_client, module, location): current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) changed = True - result['policy'] = current_policy + result["policy"] = current_policy # Tags try: current_tags_dict = get_current_bucket_tags_dict(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if tags is not None: + module.fail_json_aws(e, msg="Bucket tagging is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if tags is not None: module.fail_json_aws(e, msg="Failed to get bucket tags") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket tags") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket tags") else: if tags is not None: @@ -574,21 +598,28 @@ def create_or_update_bucket(s3_client, module, location): current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags) changed = True - result['tags'] = current_tags_dict + result["tags"] = current_tags_dict # Encryption try: current_encryption = get_bucket_encryption(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if encryption is not None: + module.fail_json_aws(e, msg="Bucket encryption is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if encryption is not None: module.fail_json_aws(e, msg="Failed to get bucket encryption settings") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket encryption settings") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket encryption settings") else: if encryption is not None: - current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None - current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None - if encryption == 'none': + current_encryption_algorithm = current_encryption.get("SSEAlgorithm") if current_encryption else None + current_encryption_key = current_encryption.get("KMSMasterKeyID") if current_encryption else None + if encryption == "none": if current_encryption_algorithm is not None: try: delete_bucket_encryption(s3_client, name) @@ -597,16 +628,18 @@ def create_or_update_bucket(s3_client, module, location): current_encryption = wait_encryption_is_applied(module, s3_client, name, None) changed = True else: - if (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id): - expected_encryption = {'SSEAlgorithm': encryption} - if encryption == 'aws:kms' and encryption_key_id is not None: - expected_encryption.update({'KMSMasterKeyID': encryption_key_id}) + if (encryption != current_encryption_algorithm) or ( + encryption == "aws:kms" and current_encryption_key != encryption_key_id + ): + expected_encryption = {"SSEAlgorithm": encryption} + if encryption == "aws:kms" and encryption_key_id is not None: + expected_encryption.update({"KMSMasterKeyID": encryption_key_id}) current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption) changed = True if bucket_key_enabled is not None: - current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None - if current_encryption_algorithm == 'aws:kms': + current_encryption_algorithm = current_encryption.get("SSEAlgorithm") if current_encryption else None + if current_encryption_algorithm == "aws:kms": if get_bucket_key(s3_client, name) != bucket_key_enabled: if bucket_key_enabled: expected_encryption = True @@ -614,22 +647,29 @@ def create_or_update_bucket(s3_client, module, location): expected_encryption = False current_encryption = put_bucket_key_with_retry(module, s3_client, name, expected_encryption) changed = True - result['encryption'] = current_encryption + result["encryption"] = current_encryption # -- Bucket ownership try: bucket_ownership = get_bucket_ownership_cntrl(s3_client, name) - result['object_ownership'] = bucket_ownership + result["object_ownership"] = bucket_ownership except KeyError as e: # Some non-AWS providers appear to return policy documents that aren't # compatible with AWS, cleanly catch KeyError so users can continue to use # other features. if delete_object_ownership or object_ownership is not None: module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if delete_object_ownership or object_ownership is not None: + module.fail_json_aws(e, msg="Bucket object ownership is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if delete_object_ownership or object_ownership is not None: module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket object ownership settings") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") else: if delete_object_ownership: @@ -637,30 +677,33 @@ def create_or_update_bucket(s3_client, module, location): if bucket_ownership is not None: delete_bucket_ownership(s3_client, name) changed = True - result['object_ownership'] = None + result["object_ownership"] = None elif object_ownership is not None: # update S3 bucket ownership if bucket_ownership != object_ownership: put_bucket_ownership(s3_client, name, object_ownership) changed = True - result['object_ownership'] = object_ownership + result["object_ownership"] = object_ownership # -- Bucket ACL if acl: try: s3_client.put_bucket_acl(Bucket=name, ACL=acl) - result['acl'] = acl + result["acl"] = acl changed = True except KeyError as e: # Some non-AWS providers appear to return policy documents that aren't # compatible with AWS, cleanly catch KeyError so users can continue to use # other features. module.fail_json_aws(e, msg="Failed to get bucket acl block") - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: - module.fail_json_aws(e, msg="Failed to update bucket ACL") - except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + module.fail_json_aws(e, msg="Bucket ACLs ar not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Access denied trying to update bucket ACL") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to update bucket ACL") # -- Object Lock @@ -697,7 +740,7 @@ def bucket_exists(s3_client, bucket_name): try: s3_client.head_bucket(Bucket=bucket_name) bucket_exists = True - except is_boto3_error_code('404'): + except is_boto3_error_code("404"): bucket_exists = False return bucket_exists @@ -708,8 +751,8 @@ def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False): params = {"Bucket": bucket_name} configuration = {} - if location not in ('us-east-1', None): - configuration['LocationConstraint'] = location + if location not in ("us-east-1", None): + configuration["LocationConstraint"] = location if configuration: params["CreateBucketConfiguration"] = configuration @@ -720,58 +763,58 @@ def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False): s3_client.create_bucket(**params) return True - except is_boto3_error_code('BucketAlreadyOwnedByYou'): + except is_boto3_error_code("BucketAlreadyOwnedByYou"): # We should never get here since we check the bucket presence before calling the create_or_update_bucket # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception return False -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_tagging(s3_client, bucket_name, tags): - s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": ansible_dict_to_boto3_tag_list(tags)}) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_policy(s3_client, bucket_name, policy): s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy)) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_policy(s3_client, bucket_name): s3_client.delete_bucket_policy(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_policy(s3_client, bucket_name): try: - current_policy_string = s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy') + current_policy_string = s3_client.get_bucket_policy(Bucket=bucket_name).get("Policy") if not current_policy_string: return None current_policy = json.loads(current_policy_string) - except is_boto3_error_code('NoSuchBucketPolicy'): + except is_boto3_error_code("NoSuchBucketPolicy"): return None return current_policy -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_request_payment(s3_client, bucket_name, payer): - s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer}) + s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={"Payer": payer}) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_request_payment(s3_client, bucket_name): - return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer') + return s3_client.get_bucket_request_payment(Bucket=bucket_name).get("Payer") -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_versioning(s3_client, bucket_name): return s3_client.get_bucket_versioning(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_versioning(s3_client, bucket_name, required_versioning): - s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning}) + s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={"Status": required_versioning}) @AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) @@ -780,23 +823,27 @@ def get_bucket_object_lock_enabled(s3_client, bucket_name): return object_lock_configuration["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled" -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_encryption(s3_client, bucket_name): try: result = s3_client.get_bucket_encryption(Bucket=bucket_name) - return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault') - except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'): + return ( + result.get("ServerSideEncryptionConfiguration", {}) + .get("Rules", [])[0] + .get("ApplyServerSideEncryptionByDefault") + ) + except is_boto3_error_code("ServerSideEncryptionConfigurationNotFoundError"): return None except (IndexError, KeyError): return None -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_key(s3_client, bucket_name): try: result = s3_client.get_bucket_encryption(Bucket=bucket_name) - return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('BucketKeyEnabled') - except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'): + return result.get("ServerSideEncryptionConfiguration", {}).get("Rules", [])[0].get("BucketKeyEnabled") + except is_boto3_error_code("ServerSideEncryptionConfigurationNotFoundError"): return None except (IndexError, KeyError): return None @@ -807,24 +854,34 @@ def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryptio for retries in range(1, max_retries + 1): try: put_bucket_encryption(s3_client, name, expected_encryption) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to set bucket encryption") - current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption, - should_fail=(retries == max_retries), retries=5) + current_encryption = wait_encryption_is_applied( + module, s3_client, name, expected_encryption, should_fail=(retries == max_retries), retries=5 + ) if current_encryption == expected_encryption: return current_encryption # We shouldn't get here, the only time this should happen is if # current_encryption != expected_encryption and retries == max_retries # Which should use module.fail_json and fail out first. - module.fail_json(msg='Failed to apply bucket encryption', - current=current_encryption, expected=expected_encryption, retries=retries) + module.fail_json( + msg="Failed to apply bucket encryption", + current=current_encryption, + expected=expected_encryption, + retries=retries, + ) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_encryption(s3_client, bucket_name, encryption): - server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]} - s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration) + server_side_encryption_configuration = {"Rules": [{"ApplyServerSideEncryptionByDefault": encryption}]} + s3_client.put_bucket_encryption( + Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration + ) def put_bucket_key_with_retry(module, s3_client, name, expected_encryption): @@ -832,86 +889,87 @@ def put_bucket_key_with_retry(module, s3_client, name, expected_encryption): for retries in range(1, max_retries + 1): try: put_bucket_key(s3_client, name, expected_encryption) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to set bucket Key") - current_encryption = wait_bucket_key_is_applied(module, s3_client, name, expected_encryption, - should_fail=(retries == max_retries), retries=5) + current_encryption = wait_bucket_key_is_applied( + module, s3_client, name, expected_encryption, should_fail=(retries == max_retries), retries=5 + ) if current_encryption == expected_encryption: return current_encryption # We shouldn't get here, the only time this should happen is if # current_encryption != expected_encryption and retries == max_retries # Which should use module.fail_json and fail out first. - module.fail_json(msg='Failed to set bucket key', - current=current_encryption, expected=expected_encryption, retries=retries) + module.fail_json( + msg="Failed to set bucket key", current=current_encryption, expected=expected_encryption, retries=retries + ) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_key(s3_client, bucket_name, encryption): # server_side_encryption_configuration ={'Rules': [{'BucketKeyEnabled': encryption}]} encryption_status = s3_client.get_bucket_encryption(Bucket=bucket_name) - encryption_status['ServerSideEncryptionConfiguration']['Rules'][0]['BucketKeyEnabled'] = encryption + encryption_status["ServerSideEncryptionConfiguration"]["Rules"][0]["BucketKeyEnabled"] = encryption s3_client.put_bucket_encryption( - Bucket=bucket_name, - ServerSideEncryptionConfiguration=encryption_status[ - 'ServerSideEncryptionConfiguration'] + Bucket=bucket_name, ServerSideEncryptionConfiguration=encryption_status["ServerSideEncryptionConfiguration"] ) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_tagging(s3_client, bucket_name): s3_client.delete_bucket_tagging(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_encryption(s3_client, bucket_name): s3_client.delete_bucket_encryption(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=['OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=["OperationAborted"]) def delete_bucket(s3_client, bucket_name): try: s3_client.delete_bucket(Bucket=bucket_name) - except is_boto3_error_code('NoSuchBucket'): + except is_boto3_error_code("NoSuchBucket"): # This means bucket should have been in a deleting state when we checked it existence # We just ignore the error pass -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_public_access(s3_client, bucket_name, public_acces): - ''' + """ Put new public access block to S3 bucket - ''' + """ s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=public_acces) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_public_access(s3_client, bucket_name): - ''' + """ Delete public access block from S3 bucket - ''' + """ s3_client.delete_public_access_block(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_ownership(s3_client, bucket_name): - ''' + """ Delete bucket ownership controls from S3 bucket - ''' + """ s3_client.delete_bucket_ownership_controls(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_ownership(s3_client, bucket_name, target): - ''' + """ Put bucket ownership controls for S3 bucket - ''' + """ s3_client.put_bucket_ownership_controls( - Bucket=bucket_name, - OwnershipControls={ - 'Rules': [{'ObjectOwnership': target}] - }) + Bucket=bucket_name, OwnershipControls={"Rules": [{"ObjectOwnership": target}]} + ) def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True): @@ -926,8 +984,11 @@ def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, shou else: return current_policy if should_fail: - module.fail_json(msg="Bucket policy failed to apply in the expected time", - requested_policy=expected_policy, live_policy=current_policy) + module.fail_json( + msg="Bucket policy failed to apply in the expected time", + requested_policy=expected_policy, + live_policy=current_policy, + ) else: return None @@ -943,8 +1004,11 @@ def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should else: return requester_pays_status if should_fail: - module.fail_json(msg="Bucket request payment failed to apply in the expected time", - requested_status=expected_payer, live_status=requester_pays_status) + module.fail_json( + msg="Bucket request payment failed to apply in the expected time", + requested_status=expected_payer, + live_status=requester_pays_status, + ) else: return None @@ -961,8 +1025,11 @@ def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encrypti return encryption if should_fail: - module.fail_json(msg="Bucket encryption failed to apply in the expected time", - requested_encryption=expected_encryption, live_encryption=encryption) + module.fail_json( + msg="Bucket encryption failed to apply in the expected time", + requested_encryption=expected_encryption, + live_encryption=encryption, + ) return encryption @@ -979,8 +1046,11 @@ def wait_bucket_key_is_applied(module, s3_client, bucket_name, expected_encrypti return encryption if should_fail: - module.fail_json(msg="Bucket Key failed to apply in the expected time", - requested_encryption=expected_encryption, live_encryption=encryption) + module.fail_json( + msg="Bucket Key failed to apply in the expected time", + requested_encryption=expected_encryption, + live_encryption=encryption, + ) return encryption @@ -990,12 +1060,15 @@ def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioni versioning_status = get_bucket_versioning(s3_client, bucket_name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to get updated versioning for bucket") - if versioning_status.get('Status') != required_versioning: + if versioning_status.get("Status") != required_versioning: time.sleep(8) else: return versioning_status - module.fail_json(msg="Bucket versioning failed to apply in the expected time", - requested_versioning=required_versioning, live_versioning=versioning_status) + module.fail_json( + msg="Bucket versioning failed to apply in the expected time", + requested_versioning=required_versioning, + live_versioning=versioning_status, + ) def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict): @@ -1008,68 +1081,72 @@ def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict): time.sleep(5) else: return current_tags_dict - module.fail_json(msg="Bucket tags failed to apply in the expected time", - requested_tags=expected_tags_dict, live_tags=current_tags_dict) + module.fail_json( + msg="Bucket tags failed to apply in the expected time", + requested_tags=expected_tags_dict, + live_tags=current_tags_dict, + ) def get_current_bucket_tags_dict(s3_client, bucket_name): try: - current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet') - except is_boto3_error_code('NoSuchTagSet'): + current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get("TagSet") + except is_boto3_error_code("NoSuchTagSet"): return {} # The Ceph S3 API returns a different error code to AWS - except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchTagSetError"): # pylint: disable=duplicate-except return {} return boto3_tag_list_to_ansible_dict(current_tags) def get_bucket_public_access(s3_client, bucket_name): - ''' + """ Get current bucket public access block - ''' + """ try: bucket_public_access_block = s3_client.get_public_access_block(Bucket=bucket_name) - return bucket_public_access_block['PublicAccessBlockConfiguration'] - except is_boto3_error_code('NoSuchPublicAccessBlockConfiguration'): + return bucket_public_access_block["PublicAccessBlockConfiguration"] + except is_boto3_error_code("NoSuchPublicAccessBlockConfiguration"): return {} def get_bucket_ownership_cntrl(s3_client, bucket_name): - ''' + """ Get current bucket public access block - ''' + """ try: bucket_ownership = s3_client.get_bucket_ownership_controls(Bucket=bucket_name) - return bucket_ownership['OwnershipControls']['Rules'][0]['ObjectOwnership'] - except is_boto3_error_code(['OwnershipControlsNotFoundError', 'NoSuchOwnershipControls']): + return bucket_ownership["OwnershipControls"]["Rules"][0]["ObjectOwnership"] + except is_boto3_error_code(["OwnershipControlsNotFoundError", "NoSuchOwnershipControls"]): return None def paginated_list(s3_client, **pagination_params): - pg = s3_client.get_paginator('list_objects_v2') + pg = s3_client.get_paginator("list_objects_v2") for page in pg.paginate(**pagination_params): - yield [data['Key'] for data in page.get('Contents', [])] + yield [data["Key"] for data in page.get("Contents", [])] def paginated_versions_list(s3_client, **pagination_params): try: - pg = s3_client.get_paginator('list_object_versions') + pg = s3_client.get_paginator("list_object_versions") for page in pg.paginate(**pagination_params): # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion - yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))] - except is_boto3_error_code('NoSuchBucket'): + yield [ + (data["Key"], data["VersionId"]) for data in (page.get("Versions", []) + page.get("DeleteMarkers", [])) + ] + except is_boto3_error_code("NoSuchBucket"): yield [] def destroy_bucket(s3_client, module): - force = module.params.get("force") name = module.params.get("name") try: bucket_is_present = bucket_exists(s3_client, name) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to check bucket presence") @@ -1080,168 +1157,120 @@ def destroy_bucket(s3_client, module): # if there are contents then we need to delete them (including versions) before we can delete the bucket try: for key_version_pairs in paginated_versions_list(s3_client, Bucket=name): - formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs] + formatted_keys = [{"Key": key, "VersionId": version} for key, version in key_version_pairs] for fk in formatted_keys: # remove VersionId from cases where they are `None` so that # unversioned objects are deleted using `DeleteObject` # rather than `DeleteObjectVersion`, improving backwards # compatibility with older IAM policies. - if not fk.get('VersionId'): - fk.pop('VersionId') + if not fk.get("VersionId") or fk.get("VersionId") == "null": + fk.pop("VersionId") if formatted_keys: - resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys}) - if resp.get('Errors'): + resp = s3_client.delete_objects(Bucket=name, Delete={"Objects": formatted_keys}) + if resp.get("Errors"): + objects_to_delete = ", ".join([k["Key"] for k in resp["Errors"]]) module.fail_json( - msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format( - ', '.join([k['Key'] for k in resp['Errors']]) + msg=( + f"Could not empty bucket before deleting. Could not delete objects: {objects_to_delete}" ), - errors=resp['Errors'], response=resp + errors=resp["Errors"], + response=resp, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed while deleting bucket") try: delete_bucket(s3_client, name) - s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60)) + s3_client.get_waiter("bucket_not_exists").wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60)) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.') + module.fail_json_aws(e, msg="An error occurred waiting for the bucket to be deleted.") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete bucket") module.exit_json(changed=True) -def is_fakes3(endpoint_url): - """ Return True if endpoint_url has scheme fakes3:// """ - if endpoint_url is not None: - return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') - else: - return False - - -def get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url): - if ceph: # TODO - test this - ceph = urlparse(endpoint_url) - params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', - region=location, endpoint=endpoint_url, **aws_connect_kwargs) - elif is_fakes3(endpoint_url): - fakes3 = urlparse(endpoint_url) - port = fakes3.port - if fakes3.scheme == 'fakes3s': - protocol = "https" - if port is None: - port = 443 - else: - protocol = "http" - if port is None: - port = 80 - params = dict(module=module, conn_type='client', resource='s3', region=location, - endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), - use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) - else: - params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) - return boto3_conn(**params) - - def main(): - argument_spec = dict( - force=dict(default=False, type='bool'), - policy=dict(type='json'), + force=dict(default=False, type="bool"), + policy=dict(type="json"), name=dict(required=True), - requester_pays=dict(type='bool'), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - versioning=dict(type='bool'), - ceph=dict(default=False, type='bool', aliases=['rgw']), - encryption=dict(choices=['none', 'AES256', 'aws:kms']), + requester_pays=dict(type="bool"), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + versioning=dict(type="bool"), + ceph=dict(default=False, type="bool", aliases=["rgw"]), + encryption=dict(choices=["none", "AES256", "aws:kms"]), encryption_key_id=dict(), - bucket_key_enabled=dict(type='bool'), - public_access=dict(type='dict', options=dict( - block_public_acls=dict(type='bool', default=False), - ignore_public_acls=dict(type='bool', default=False), - block_public_policy=dict(type='bool', default=False), - restrict_public_buckets=dict(type='bool', default=False))), - delete_public_access=dict(type='bool', default=False), - object_ownership=dict(type='str', choices=['BucketOwnerEnforced', 'BucketOwnerPreferred', 'ObjectWriter']), - delete_object_ownership=dict(type='bool', default=False), - acl=dict(type='str', choices=['private', 'public-read', 'public-read-write', 'authenticated-read']), - validate_bucket_name=dict(type='bool', default=True), + bucket_key_enabled=dict(type="bool"), + public_access=dict( + type="dict", + options=dict( + block_public_acls=dict(type="bool", default=False), + ignore_public_acls=dict(type="bool", default=False), + block_public_policy=dict(type="bool", default=False), + restrict_public_buckets=dict(type="bool", default=False), + ), + ), + delete_public_access=dict(type="bool", default=False), + object_ownership=dict(type="str", choices=["BucketOwnerEnforced", "BucketOwnerPreferred", "ObjectWriter"]), + delete_object_ownership=dict(type="bool", default=False), + acl=dict(type="str", choices=["private", "public-read", "public-read-write", "authenticated-read"]), + validate_bucket_name=dict(type="bool", default=True), + dualstack=dict(default=False, type="bool"), object_lock_enabled=dict(type="bool"), ) required_by = dict( - encryption_key_id=('encryption',), + encryption_key_id=("encryption",), ) mutually_exclusive = [ - ['public_access', 'delete_public_access'], - ['delete_object_ownership', 'object_ownership'] + ["public_access", "delete_public_access"], + ["delete_object_ownership", "object_ownership"], + ["dualstack", "endpoint_url"], ] required_if = [ - ['ceph', True, ['endpoint_url']], + ["ceph", True, ["endpoint_url"]], ] module = AnsibleAWSModule( argument_spec=argument_spec, required_by=required_by, required_if=required_if, - mutually_exclusive=mutually_exclusive + mutually_exclusive=mutually_exclusive, ) - region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - - if module.params.get('validate_bucket_name'): - validate_bucket_name(module, module.params["name"]) - - if region in ('us-east-1', '', None): - # default to US Standard region - location = 'us-east-1' - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - - endpoint_url = module.params.get('endpoint_url') - ceph = module.params.get('ceph') - - # Look at endpoint_url and tweak connection settings - # allow eucarc environment variables to be used if ansible vars aren't set - if not endpoint_url and 'S3_URL' in os.environ: - endpoint_url = os.environ['S3_URL'] - module.deprecate( - "Support for the 'S3_URL' environment variable has been " - "deprecated. We recommend using the 'endpoint_url' module " - "parameter. Alternatively, the 'AWS_URL' environment variable can" - "be used instead.", - date='2024-12-01', collection_name='amazon.aws', + # Parameter validation + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + if encryption_key_id is not None and encryption != "aws:kms": + module.fail_json( + msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id." ) - # if connecting to Ceph RGW, Walrus or fakes3 - if endpoint_url: - for key in ['validate_certs', 'security_token', 'profile_name']: - aws_connect_kwargs.pop(key, None) - s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url) + extra_params = s3_extra_params(module.params) + retry_decorator = AWSRetry.jittered_backoff( + max_delay=120, + catch_extra_error_codes=["NoSuchBucket", "OperationAborted"], + ) + s3_client = module.client("s3", retry_decorator=retry_decorator, **extra_params) - if s3_client is None: # this should never happen - module.fail_json(msg='Unknown error, failed to create s3 connection, no information available.') + if module.params.get("validate_bucket_name"): + err = validate_bucket_name(module.params["name"]) + if err: + module.fail_json(msg=err) state = module.params.get("state") - encryption = module.params.get("encryption") - encryption_key_id = module.params.get("encryption_key_id") - - # Parameter validation - if encryption_key_id is not None and encryption != 'aws:kms': - module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.") - if state == 'present': - create_or_update_bucket(s3_client, module, location) - elif state == 'absent': + if state == "present": + create_or_update_bucket(s3_client, module) + elif state == "absent": destroy_bucket(s3_client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py new file mode 100644 index 000000000..b382e5eeb --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py @@ -0,0 +1,642 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: s3_bucket_info +version_added: 1.0.0 +version_added_collection: community.aws +author: + - "Gerben Geijteman (@hyperized)" +short_description: Lists S3 buckets in AWS +description: + - Lists S3 buckets and details about those buckets. + - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_bucket_info). + The usage did not change. +options: + name: + description: + - Name of bucket to query. + type: str + default: "" + version_added: 1.4.0 + name_filter: + description: + - Limits buckets to only buckets who's name contain the string in I(name_filter). + type: str + default: "" + version_added: 1.4.0 + bucket_facts: + description: + - Retrieve requested S3 bucket detailed information. + - Each bucket_X option executes one API call, hence many options being set to C(true) will cause slower module execution. + - You can limit buckets by using the I(name) or I(name_filter) option. + suboptions: + bucket_accelerate_configuration: + description: Retrieve S3 accelerate configuration. + type: bool + default: False + bucket_location: + description: Retrieve S3 bucket location. + type: bool + default: False + bucket_replication: + description: Retrieve S3 bucket replication. + type: bool + default: False + bucket_acl: + description: Retrieve S3 bucket ACLs. + type: bool + default: False + bucket_logging: + description: Retrieve S3 bucket logging. + type: bool + default: False + bucket_request_payment: + description: Retrieve S3 bucket request payment. + type: bool + default: False + bucket_tagging: + description: Retrieve S3 bucket tagging. + type: bool + default: False + bucket_cors: + description: Retrieve S3 bucket CORS configuration. + type: bool + default: False + bucket_notification_configuration: + description: Retrieve S3 bucket notification configuration. + type: bool + default: False + bucket_encryption: + description: Retrieve S3 bucket encryption. + type: bool + default: False + bucket_ownership_controls: + description: + - Retrieve S3 ownership controls. + type: bool + default: False + bucket_website: + description: Retrieve S3 bucket website. + type: bool + default: False + bucket_policy: + description: Retrieve S3 bucket policy. + type: bool + default: False + bucket_policy_status: + description: Retrieve S3 bucket policy status. + type: bool + default: False + bucket_lifecycle_configuration: + description: Retrieve S3 bucket lifecycle configuration. + type: bool + default: False + public_access_block: + description: Retrieve S3 bucket public access block. + type: bool + default: False + bucket_versioning: + description: + - Retrieve the versioning state of a bucket. + - To retrieve the versioning state of a bucket, you must be the bucket owner. + type: bool + default: False + version_added: 7.3.0 + type: dict + version_added: 1.4.0 + transform_location: + description: + - S3 bucket location for default us-east-1 is normally reported as C(null). + - Setting this option to C(true) will return C(us-east-1) instead. + - Affects only queries with I(bucket_facts=true) and I(bucket_location=true). + type: bool + default: False + version_added: 1.4.0 +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Note: Only AWS S3 is currently supported + +# Lists all S3 buckets +- amazon.aws.s3_bucket_info: + register: result + +# Retrieve detailed bucket information +- amazon.aws.s3_bucket_info: + # Show only buckets with name matching + name_filter: your.testing + # Choose facts to retrieve + bucket_facts: + # bucket_accelerate_configuration: true + bucket_acl: true + bucket_cors: true + bucket_encryption: true + # bucket_lifecycle_configuration: true + bucket_location: true + # bucket_logging: true + # bucket_notification_configuration: true + # bucket_ownership_controls: true + # bucket_policy: true + # bucket_policy_status: true + # bucket_replication: true + # bucket_request_payment: true + # bucket_tagging: true + # bucket_website: true + # public_access_block: true + transform_location: true + register: result + +# Print out result +- name: List buckets + ansible.builtin.debug: + msg: "{{ result['buckets'] }}" +""" + +RETURN = r""" +bucket_list: + description: "List of buckets" + returned: always + type: complex + contains: + name: + description: Bucket name. + returned: always + type: str + sample: a-testing-bucket-name + creation_date: + description: Bucket creation date timestamp. + returned: always + type: str + sample: "2021-01-21T12:44:10+00:00" + public_access_block: + description: Bucket public access block configuration. + returned: when I(bucket_facts=true) and I(public_access_block=true) + type: complex + contains: + PublicAccessBlockConfiguration: + description: PublicAccessBlockConfiguration data. + returned: when PublicAccessBlockConfiguration is defined for the bucket + type: complex + contains: + BlockPublicAcls: + description: BlockPublicAcls setting value. + type: bool + sample: true + BlockPublicPolicy: + description: BlockPublicPolicy setting value. + type: bool + sample: true + IgnorePublicAcls: + description: IgnorePublicAcls setting value. + type: bool + sample: true + RestrictPublicBuckets: + description: RestrictPublicBuckets setting value. + type: bool + sample: true + bucket_name_filter: + description: String used to limit buckets. See I(name_filter). + returned: when I(name_filter) is defined + type: str + sample: filter-by-this-string + bucket_acl: + description: Bucket ACL configuration. + returned: when I(bucket_facts=true) and I(bucket_acl=true) + type: complex + contains: + Grants: + description: List of ACL grants. + type: list + sample: [] + Owner: + description: Bucket owner information. + type: complex + contains: + DisplayName: + description: Bucket owner user display name. + returned: always + type: str + sample: username + ID: + description: Bucket owner user ID. + returned: always + type: str + sample: 123894e509349etc + bucket_cors: + description: Bucket CORS configuration. + returned: when I(bucket_facts=true) and I(bucket_cors=true) + type: complex + contains: + CORSRules: + description: Bucket CORS configuration. + returned: when CORS rules are defined for the bucket + type: list + sample: [] + bucket_encryption: + description: Bucket encryption configuration. + returned: when I(bucket_facts=true) and I(bucket_encryption=true) + type: complex + contains: + ServerSideEncryptionConfiguration: + description: ServerSideEncryptionConfiguration configuration. + returned: when encryption is enabled on the bucket + type: complex + contains: + Rules: + description: List of applied encryptio rules. + returned: when encryption is enabled on the bucket + type: list + sample: { "ApplyServerSideEncryptionByDefault": { "SSEAlgorithm": "AES256" }, "BucketKeyEnabled": False } + bucket_lifecycle_configuration: + description: Bucket lifecycle configuration settings. + returned: when I(bucket_facts=true) and I(bucket_lifecycle_configuration=true) + type: complex + contains: + Rules: + description: List of lifecycle management rules. + returned: when lifecycle configuration is present + type: list + sample: [{ "Status": "Enabled", "ID": "example-rule" }] + bucket_location: + description: Bucket location. + returned: when I(bucket_facts=true) and I(bucket_location=true) + type: complex + contains: + LocationConstraint: + description: AWS region. + returned: always + type: str + sample: us-east-2 + bucket_logging: + description: Server access logging configuration. + returned: when I(bucket_facts=true) and I(bucket_logging=true) + type: complex + contains: + LoggingEnabled: + description: Server access logging configuration. + returned: when server access logging is defined for the bucket + type: complex + contains: + TargetBucket: + description: Target bucket name. + returned: always + type: str + sample: logging-bucket-name + TargetPrefix: + description: Prefix in target bucket. + returned: always + type: str + sample: "" + bucket_notification_configuration: + description: Bucket notification settings. + returned: when I(bucket_facts=true) and I(bucket_notification_configuration=true) + type: complex + contains: + TopicConfigurations: + description: List of notification events configurations. + returned: when at least one notification is configured + type: list + sample: [] + bucket_ownership_controls: + description: Preffered object ownership settings. + returned: when I(bucket_facts=true) and I(bucket_ownership_controls=true) + type: complex + contains: + OwnershipControls: + description: Object ownership settings. + returned: when ownership controls are defined for the bucket + type: complex + contains: + Rules: + description: List of ownership rules. + returned: when ownership rule is defined + type: list + sample: [{ "ObjectOwnership:": "ObjectWriter" }] + bucket_policy: + description: Bucket policy contents. + returned: when I(bucket_facts=true) and I(bucket_policy=true) + type: str + sample: '{"Version":"2012-10-17","Statement":[{"Sid":"AddCannedAcl","Effect":"Allow",..}}]}' + bucket_policy_status: + description: Status of bucket policy. + returned: when I(bucket_facts=true) and I(bucket_policy_status=true) + type: complex + contains: + PolicyStatus: + description: Status of bucket policy. + returned: when bucket policy is present + type: complex + contains: + IsPublic: + description: Report bucket policy public status. + returned: when bucket policy is present + type: bool + sample: True + bucket_replication: + description: Replication configuration settings. + returned: when I(bucket_facts=true) and I(bucket_replication=true) + type: complex + contains: + Role: + description: IAM role used for replication. + returned: when replication rule is defined + type: str + sample: "arn:aws:iam::123:role/example-role" + Rules: + description: List of replication rules. + returned: when replication rule is defined + type: list + sample: [{ "ID": "rule-1", "Filter": "{}" }] + bucket_request_payment: + description: Requester pays setting. + returned: when I(bucket_facts=true) and I(bucket_request_payment=true) + type: complex + contains: + Payer: + description: Current payer. + returned: always + type: str + sample: BucketOwner + bucket_tagging: + description: Bucket tags. + returned: when I(bucket_facts=true) and I(bucket_tagging=true) + type: dict + sample: { "Tag1": "Value1", "Tag2": "Value2" } + bucket_website: + description: Static website hosting. + returned: when I(bucket_facts=true) and I(bucket_website=true) + type: complex + contains: + ErrorDocument: + description: Object serving as HTTP error page. + returned: when static website hosting is enabled + type: dict + sample: { "Key": "error.html" } + IndexDocument: + description: Object serving as HTTP index page. + returned: when static website hosting is enabled + type: dict + sample: { "Suffix": "error.html" } + RedirectAllRequestsTo: + description: Website redict settings. + returned: when redirect requests is configured + type: complex + contains: + HostName: + description: Hostname to redirect. + returned: always + type: str + sample: www.example.com + Protocol: + description: Protocol used for redirect. + returned: always + type: str + sample: https + bucket_versioning: + description: + - The versioning state of the bucket. + - This will also specify whether MFA delete is enabled in the bucket versioning configuration. + if only the bucket has been configured with MFA delete. + returned: when I(bucket_facts=true) and I(bucket_versioning=true) + type: dict + sample: { 'Status': 'Enabled' } + version_added: 7.2.0 +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +def get_bucket_list(module, connection, name="", name_filter=""): + """ + Return result of list_buckets json encoded + Filter only buckets matching 'name' or name_filter if defined + :param module: + :param connection: + :return: + """ + buckets = [] + filtered_buckets = [] + final_buckets = [] + + # Get all buckets + try: + buckets = camel_dict_to_snake_dict(connection.list_buckets())["buckets"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: + module.fail_json_aws(err_code, msg="Failed to list buckets") + + # Filter buckets if requested + if name_filter: + for bucket in buckets: + if name_filter in bucket["name"]: + filtered_buckets.append(bucket) + elif name: + for bucket in buckets: + if name == bucket["name"]: + filtered_buckets.append(bucket) + + # Return proper list (filtered or all) + if name or name_filter: + final_buckets = filtered_buckets + else: + final_buckets = buckets + return final_buckets + + +def get_buckets_facts(connection, buckets, requested_facts, transform_location): + """ + Retrieve additional information about S3 buckets + """ + full_bucket_list = [] + # Iterate over all buckets and append Retrieved facts to bucket + for bucket in buckets: + bucket.update(get_bucket_details(connection, bucket["name"], requested_facts, transform_location)) + full_bucket_list.append(bucket) + + return full_bucket_list + + +def get_bucket_details(connection, name, requested_facts, transform_location): + """ + Execute all enabled S3API get calls for selected bucket + """ + all_facts = {} + + for key in requested_facts: + if requested_facts[key]: + if key == "bucket_location": + all_facts[key] = {} + try: + all_facts[key] = get_bucket_location(name, connection, transform_location) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + elif key == "bucket_tagging": + all_facts[key] = {} + try: + all_facts[key] = get_bucket_tagging(name, connection) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + else: + all_facts[key] = {} + try: + all_facts[key] = get_bucket_property(name, connection, key) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + + return all_facts + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) +def get_bucket_location(name, connection, transform_location=False): + """ + Get bucket location and optionally transform 'null' to 'us-east-1' + """ + data = connection.get_bucket_location(Bucket=name) + + # Replace 'null' with 'us-east-1'? + if transform_location: + try: + if not data["LocationConstraint"]: + data["LocationConstraint"] = "us-east-1" + except KeyError: + pass + # Strip response metadata (not needed) + data.pop("ResponseMetadata", None) + return data + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) +def get_bucket_tagging(name, connection): + """ + Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function + """ + data = connection.get_bucket_tagging(Bucket=name) + + try: + bucket_tags = boto3_tag_list_to_ansible_dict(data["TagSet"]) + return bucket_tags + except KeyError: + # Strip response metadata (not needed) + data.pop("ResponseMetadata", None) + return data + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) +def get_bucket_property(name, connection, get_api_name): + """ + Get bucket property + """ + api_call = "get_" + get_api_name + api_function = getattr(connection, api_call) + data = api_function(Bucket=name) + + # Strip response metadata (not needed) + data.pop("ResponseMetadata", None) + return data + + +def main(): + """ + Get list of S3 buckets + :return: + """ + argument_spec = dict( + name=dict(type="str", default=""), + name_filter=dict(type="str", default=""), + bucket_facts=dict( + type="dict", + options=dict( + bucket_accelerate_configuration=dict(type="bool", default=False), + bucket_acl=dict(type="bool", default=False), + bucket_cors=dict(type="bool", default=False), + bucket_encryption=dict(type="bool", default=False), + bucket_lifecycle_configuration=dict(type="bool", default=False), + bucket_location=dict(type="bool", default=False), + bucket_logging=dict(type="bool", default=False), + bucket_notification_configuration=dict(type="bool", default=False), + bucket_ownership_controls=dict(type="bool", default=False), + bucket_policy=dict(type="bool", default=False), + bucket_policy_status=dict(type="bool", default=False), + bucket_replication=dict(type="bool", default=False), + bucket_request_payment=dict(type="bool", default=False), + bucket_tagging=dict(type="bool", default=False), + bucket_website=dict(type="bool", default=False), + public_access_block=dict(type="bool", default=False), + bucket_versioning=dict(type="bool", default=False), + ), + ), + transform_location=dict(type="bool", default=False), + ) + + # Ensure we have an empty dict + result = {} + + # Define mutually exclusive options + mutually_exclusive = [ + ["name", "name_filter"], + ] + + # Including ec2 argument spec + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) + + # Get parameters + name = module.params.get("name") + name_filter = module.params.get("name_filter") + requested_facts = module.params.get("bucket_facts") + transform_location = module.params.get("bucket_facts") + + # Set up connection + connection = {} + try: + connection = module.client("s3") + except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: + module.fail_json_aws(err_code, msg="Failed to connect to AWS") + + # Get basic bucket list (name + creation date) + bucket_list = get_bucket_list(module, connection, name, name_filter) + + # Add information about name/name_filter to result + if name: + result["bucket_name"] = name + elif name_filter: + result["bucket_name_filter"] = name_filter + + # Gather detailed information about buckets if requested + bucket_facts = module.params.get("bucket_facts") + if bucket_facts: + result["buckets"] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) + else: + result["buckets"] = bucket_list + + module.exit_json(msg="Retrieved s3 info.", **result) + + +# MAIN +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object.py b/ansible_collections/amazon/aws/plugins/modules/s3_object.py index 50beab9d2..2c4ebe9c3 100644 --- a/ansible_collections/amazon/aws/plugins/modules/s3_object.py +++ b/ansible_collections/amazon/aws/plugins/modules/s3_object.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: s3_object version_added: 1.0.0 @@ -15,8 +13,6 @@ description: - This module allows the user to manage the objects and directories within S3 buckets. Includes support for creating and deleting objects and directories, retrieving objects as files or strings, generating download links and copying objects that are already stored in Amazon S3. - - Support for creating or deleting S3 buckets with this module has been deprecated and will be - removed in release 6.0.0. - S3 buckets can be created or deleted using the M(amazon.aws.s3_bucket) module. - Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID. - When using non-AWS services, I(endpoint_url) should be specified. @@ -84,19 +80,22 @@ options: - 'C(getstr): download object as string' - 'C(list): list keys' - 'C(create): create bucket directories' - - 'C(delete): delete bucket directories' - 'C(delobj): delete object' - 'C(copy): copy object that is already stored in another bucket' - - Support for creating and deleting buckets has been deprecated and will - be removed in release 6.0.0. To create and manage the bucket itself - please use the M(amazon.aws.s3_bucket) module. + - Support for creating and deleting buckets was removed in release 6.0.0. + To create and manage the bucket itself please use the M(amazon.aws.s3_bucket) module. required: true - choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'] + choices: ['get', 'put', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'] type: str object: description: - - Keyname of the object inside the bucket. + - Key name of the object inside the bucket. - Can be used to create "virtual directories", see examples. + - Object key names should not include the leading C(/), see + U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html) for more + information. + - Support for passing the leading C(/) has been deprecated and will be removed + in a release after 2025-12-01. type: str sig_v4: description: @@ -116,6 +115,14 @@ options: - For a full list of permissions see the AWS documentation U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl). default: ['private'] + choices: + - "private" + - "public-read" + - "public-read-write" + - "aws-exec-read" + - "authenticated-read" + - "bucket-owner-read" + - "bucket-owner-full-control" type: list elements: str prefix: @@ -154,6 +161,9 @@ options: dualstack: description: - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Support for passing I(dualstack) and I(endpoint_url) at the same time has been deprecated, + the dualstack endpoints are automatically configured using the configured I(region). + Support will be removed in a release after 2024-12-01. type: bool default: false ceph: @@ -218,11 +228,19 @@ options: type: str description: - key name of the source object. - required: true + - if not specified, all the objects of the I(copy_src.bucket) will be copied into the specified bucket. + required: false version_id: type: str description: - version ID of the source object. + prefix: + description: + - Copy all the keys that begin with the specified prefix. + - Ignored if I(copy_src.object) is supplied. + default: "" + type: str + version_added: 6.2.0 validate_bucket_name: description: - Whether the bucket name should be validated to conform to AWS S3 naming rules. @@ -244,14 +262,15 @@ notes: - Support for the C(S3_URL) environment variable has been deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter or the C(AWS_URL) environment variable. + - Support for creating and deleting buckets was removed in release 6.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Simple PUT operation amazon.aws.s3_object: bucket: mybucket @@ -319,24 +338,6 @@ EXAMPLES = ''' marker: /my/desired/0023.txt max_keys: 472 -- name: Create an empty bucket - amazon.aws.s3_object: - bucket: mybucket - mode: create - permission: public-read - -- name: Create a bucket with key as directory, in the EU region - amazon.aws.s3_object: - bucket: mybucket - object: /my/directory/path - mode: create - region: eu-west-1 - -- name: Delete a bucket and all contents - amazon.aws.s3_object: - bucket: mybucket - mode: delete - - name: GET an object but don't download if the file checksums match. New in 2.0 amazon.aws.s3_object: bucket: mybucket @@ -357,11 +358,19 @@ EXAMPLES = ''' object: /my/desired/key.txt mode: copy copy_src: - bucket: srcbucket - object: /source/key.txt -''' + bucket: srcbucket + object: /source/key.txt -RETURN = ''' +- name: Copy all the objects with name starting with 'ansible_' + amazon.aws.s3_object: + bucket: mybucket + mode: copy + copy_src: + bucket: srcbucket + prefix: 'ansible_' +""" + +RETURN = r""" msg: description: Message indicating the status of the operation. returned: always @@ -391,57 +400,72 @@ s3_keys: - prefix1/ - prefix1/key1 - prefix1/key2 -''' +""" +import base64 +import copy +import io import mimetypes import os -import io -from ssl import SSLError -import base64 import time +from ssl import SSLError try: + # Beware, S3 is a "special" case, it sometimes catches botocore exceptions and + # re-raises them as boto3 exceptions. + import boto3 import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import to_text from ansible.module_utils.basic import to_native -from ansible.module_utils.six.moves.urllib.parse import urlparse - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.s3 import HAS_MD5 from ansible_collections.amazon.aws.plugins.module_utils.s3 import calculate_etag from ansible_collections.amazon.aws.plugins.module_utils.s3 import calculate_etag_content +from ansible_collections.amazon.aws.plugins.module_utils.s3 import s3_extra_params from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented'] +IGNORE_S3_DROP_IN_EXCEPTIONS = ["XNotImplemented", "NotImplemented"] class Sigv4Required(Exception): pass +class S3ObjectFailure(Exception): + def __init__(self, message=None, original_e=None): + super().__init__(message) + self.original_e = original_e + self.message = message + + def key_check(module, s3, bucket, obj, version=None, validate=True): try: if version: - s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version) else: - s3.head_object(Bucket=bucket, Key=obj) - except is_boto3_error_code('404'): + s3.head_object(aws_retry=True, Bucket=bucket, Key=obj) + except is_boto3_error_code("404"): return False - except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except + except is_boto3_error_code("403") as e: # pylint: disable=duplicate-except if validate is True: - module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + module.fail_json_aws( + e, + msg=f"Failed while looking up object (during key check) {obj}.", + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure(f"Failed while looking up object (during key check) {obj}.", e) return True @@ -452,181 +476,175 @@ def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version) else: local_etag = calculate_etag_content(module, content, s3_etag, s3, bucket, obj, version) - return s3_etag == local_etag def get_etag(s3, bucket, obj, version=None): try: if version: - key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version) else: - key_check = s3.head_object(Bucket=bucket, Key=obj) + key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj) if not key_check: return None - return key_check['ETag'] - except is_boto3_error_code('404'): + return key_check["ETag"] + except is_boto3_error_code("404"): return None def get_s3_last_modified_timestamp(s3, bucket, obj, version=None): if version: - key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version) else: - key_check = s3.head_object(Bucket=bucket, Key=obj) + key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj) if not key_check: return None - return key_check['LastModified'].timestamp() + return key_check["LastModified"].timestamp() -def is_local_object_latest(module, s3, bucket, obj, version=None, local_file=None): +def is_local_object_latest(s3, bucket, obj, version=None, local_file=None): s3_last_modified = get_s3_last_modified_timestamp(s3, bucket, obj, version) - if os.path.exists(local_file) is False: + if not os.path.exists(local_file): return False - else: - local_last_modified = os.path.getmtime(local_file) - + local_last_modified = os.path.getmtime(local_file) return s3_last_modified <= local_last_modified def bucket_check(module, s3, bucket, validate=True): - exists = True try: - s3.head_bucket(Bucket=bucket) - except is_boto3_error_code('404'): - return False - except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except - if validate is True: - module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) - except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Invalid endpoint provided") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) - return exists - - -def create_bucket(module, s3, bucket, location=None): - module.deprecate('Support for creating S3 buckets using the s3_object module' - ' has been deprecated. Please use the ``s3_bucket`` module' - ' instead.', version='6.0.0', collection_name='amazon.aws') - if module.check_mode: - module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True) - configuration = {} - if location not in ('us-east-1', None): - configuration['LocationConstraint'] = location - try: - if len(configuration) > 0: - s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration) - else: - s3.create_bucket(Bucket=bucket) - if module.params.get('permission'): - # Wait for the bucket to exist before setting ACLs - s3.get_waiter('bucket_exists').wait(Bucket=bucket) - for acl in module.params.get('permission'): - AWSRetry.jittered_backoff( - max_delay=120, catch_extra_error_codes=['NoSuchBucket'] - )(s3.put_bucket_acl)(ACL=acl, Bucket=bucket) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).") - - if bucket: - return True + s3.head_bucket(aws_retry=True, Bucket=bucket) + except is_boto3_error_code("404") as e: + if validate: + raise S3ObjectFailure( + ( + f"Bucket '{bucket}' not found (during bucket_check). " + "Support for automatically creating buckets was removed in release 6.0.0. " + "The amazon.aws.s3_bucket module can be used to create buckets." + ), + e, + ) + except is_boto3_error_code("403") as e: # pylint: disable=duplicate-except + if validate: + raise S3ObjectFailure( + f"Permission denied accessing bucket '{bucket}' (during bucket_check).", + e, + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure( + f"Failed while looking up bucket '{bucket}' (during bucket_check).", + e, + ) +@AWSRetry.jittered_backoff() def paginated_list(s3, **pagination_params): - pg = s3.get_paginator('list_objects_v2') + pg = s3.get_paginator("list_objects_v2") for page in pg.paginate(**pagination_params): - yield [data['Key'] for data in page.get('Contents', [])] + for data in page.get("Contents", []): + yield data["Key"] def paginated_versioned_list_with_fallback(s3, **pagination_params): try: - versioned_pg = s3.get_paginator('list_object_versions') + versioned_pg = s3.get_paginator("list_object_versions") for page in versioned_pg.paginate(**pagination_params): - delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])] - current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])] + delete_markers = [ + {"Key": data["Key"], "VersionId": data["VersionId"]} for data in page.get("DeleteMarkers", []) + ] + current_objects = [ + {"Key": data["Key"], "VersionId": data["VersionId"]} for data in page.get("Versions", []) + ] yield delete_markers + current_objects - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']): - for page in paginated_list(s3, **pagination_params): - yield [{'Key': data['Key']} for data in page] + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ["AccessDenied"]): + for key in paginated_list(s3, **pagination_params): + yield [{"Key": key}] -def list_keys(module, s3, bucket, prefix, marker, max_keys): - pagination_params = {'Bucket': bucket} - for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)): - pagination_params[param_name] = param_value +def list_keys(s3, bucket, prefix=None, marker=None, max_keys=None): + pagination_params = { + "Bucket": bucket, + "Prefix": prefix, + "StartAfter": marker, + "MaxKeys": max_keys, + } + pagination_params = {k: v for k, v in pagination_params.items() if v} + try: - keys = sum(paginated_list(s3, **pagination_params), []) - module.exit_json(msg="LIST operation complete", s3_keys=keys) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket)) + return list(paginated_list(s3, **pagination_params)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure(f"Failed while listing the keys in the bucket {bucket}", e) -def delete_bucket(module, s3, bucket): - module.deprecate('Support for deleting S3 buckets using the s3_object module' - ' has been deprecated. Please use the ``s3_bucket`` module' - ' instead.', version='6.0.0', collection_name='amazon.aws') +def delete_key(module, s3, bucket, obj): if module.check_mode: - module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + module.exit_json( + msg="DELETE operation skipped - running in check mode", + changed=True, + ) try: - exists = bucket_check(module, s3, bucket) - if exists is False: - return False - # if there are contents then we need to delete them before we can delete the bucket - for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket): - if keys: - s3.delete_objects(Bucket=bucket, Delete={'Objects': keys}) - s3.delete_bucket(Bucket=bucket) - return True - except is_boto3_error_code('NoSuchBucket'): - return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket) + s3.delete_object(aws_retry=True, Bucket=bucket, Key=obj) + module.exit_json(msg=f"Object deleted from bucket {bucket}.", changed=True) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure(f"Failed while trying to delete {obj}.", e) -def delete_key(module, s3, bucket, obj): - if module.check_mode: - module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) +def put_object_acl(module, s3, bucket, obj, params=None): try: - s3.delete_object(Bucket=bucket, Key=obj) - module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj) + if params: + s3.put_object(aws_retry=True, **params) + for acl in module.params.get("permission"): + s3.put_object_acl(aws_retry=True, ACL=acl, Bucket=bucket, Key=obj) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn( + "PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list" + " to avoid this warning" + ) + except is_boto3_error_code("AccessControlListNotSupported"): # pylint: disable=duplicate-except + module.warn("PutObjectAcl operation : The bucket does not allow ACLs.") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure(f"Failed while creating object {obj}.", e) def create_dirkey(module, s3, bucket, obj, encrypt, expiry): if module.check_mode: module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) - try: - params = {'Bucket': bucket, 'Key': obj, 'Body': b''} - if encrypt: - params['ServerSideEncryption'] = module.params['encryption_mode'] - if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': - params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] - - s3.put_object(**params) - for acl in module.params.get('permission'): - s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while creating object %s." % obj) + params = {"Bucket": bucket, "Key": obj, "Body": b""} + params.update( + get_extra_params( + encrypt, + module.params.get("encryption_mode"), + module.params.get("encryption_kms_key_id"), + ) + ) + put_object_acl(module, s3, bucket, obj, params) # Tags tags, _changed = ensure_tags(s3, module, bucket, obj) - try: - url = s3.generate_presigned_url(ClientMethod='put_object', - Params={'Bucket': bucket, 'Key': obj}, - ExpiresIn=expiry) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to generate presigned URL") - - url = put_download_url(module, s3, bucket, obj, expiry) + url = put_download_url(s3, bucket, obj, expiry) - module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), url=url, tags=tags, changed=True) + module.exit_json( + msg=f"Virtual directory {obj} created in bucket {bucket}", + url=url, + tags=tags, + changed=True, + ) def path_check(path): @@ -636,77 +654,120 @@ def path_check(path): return False -def option_in_extra_args(option): - temp_option = option.replace('-', '').lower() +def guess_content_type(src): + if src: + content_type = mimetypes.guess_type(src)[0] + if content_type: + return content_type + + # S3 default content type + return "binary/octet-stream" + + +def get_extra_params( + encrypt=None, + encryption_mode=None, + encryption_kms_key_id=None, + metadata=None, +): + extra = {} + if encrypt: + extra["ServerSideEncryption"] = encryption_mode + if encryption_kms_key_id and encryption_mode == "aws:kms": + extra["SSEKMSKeyId"] = encryption_kms_key_id + if metadata: + extra["Metadata"] = {} + # determine object metadata and extra arguments + for option in metadata: + extra_args_option = option_in_extra_args(option) + if extra_args_option: + extra[extra_args_option] = metadata[option] + else: + extra["Metadata"][option] = metadata[option] + return extra - allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition', - 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage', - 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl', - 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP', - 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption', - 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey', - 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'} + +def option_in_extra_args(option): + temp_option = option.replace("-", "").lower() + + allowed_extra_args = { + "acl": "ACL", + "cachecontrol": "CacheControl", + "contentdisposition": "ContentDisposition", + "contentencoding": "ContentEncoding", + "contentlanguage": "ContentLanguage", + "contenttype": "ContentType", + "expires": "Expires", + "grantfullcontrol": "GrantFullControl", + "grantread": "GrantRead", + "grantreadacp": "GrantReadACP", + "grantwriteacp": "GrantWriteACP", + "metadata": "Metadata", + "requestpayer": "RequestPayer", + "serversideencryption": "ServerSideEncryption", + "storageclass": "StorageClass", + "ssecustomeralgorithm": "SSECustomerAlgorithm", + "ssecustomerkey": "SSECustomerKey", + "ssecustomerkeymd5": "SSECustomerKeyMD5", + "ssekmskeyid": "SSEKMSKeyId", + "websiteredirectlocation": "WebsiteRedirectLocation", + } if temp_option in allowed_extra_args: return allowed_extra_args[temp_option] -def upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=None, content=None, acl_disabled=False): +def upload_s3file( + module, + s3, + bucket, + obj, + expiry, + metadata, + encrypt, + headers, + src=None, + content=None, + acl_disabled=False, +): if module.check_mode: module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) try: - extra = {} - if encrypt: - extra['ServerSideEncryption'] = module.params['encryption_mode'] - if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': - extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] - if metadata: - extra['Metadata'] = {} - - # determine object metadata and extra arguments - for option in metadata: - extra_args_option = option_in_extra_args(option) - if extra_args_option is not None: - extra[extra_args_option] = metadata[option] - else: - extra['Metadata'][option] = metadata[option] - - if module.params.get('permission'): - permissions = module.params['permission'] + extra = get_extra_params( + encrypt, + module.params.get("encryption_mode"), + module.params.get("encryption_kms_key_id"), + metadata, + ) + if module.params.get("permission"): + permissions = module.params["permission"] if isinstance(permissions, str): - extra['ACL'] = permissions + extra["ACL"] = permissions elif isinstance(permissions, list): - extra['ACL'] = permissions[0] - - if 'ContentType' not in extra: - content_type = None - if src is not None: - content_type = mimetypes.guess_type(src)[0] - if content_type is None: - # s3 default content type - content_type = 'binary/octet-stream' - extra['ContentType'] = content_type - - if src is not None: - s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra) + extra["ACL"] = permissions[0] + + if "ContentType" not in extra: + extra["ContentType"] = guess_content_type(src) + + if src: + s3.upload_file(aws_retry=True, Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra) else: f = io.BytesIO(content) - s3.upload_fileobj(Fileobj=f, Bucket=bucket, Key=obj, ExtraArgs=extra) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to complete PUT operation.") + s3.upload_fileobj(aws_retry=True, Fileobj=f, Bucket=bucket, Key=obj, ExtraArgs=extra) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Unable to complete PUT operation.", e) + if not acl_disabled: - try: - for acl in module.params.get('permission'): - s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to set object ACL") + put_object_acl(module, s3, bucket, obj) # Tags tags, _changed = ensure_tags(s3, module, bucket, obj) - url = put_download_url(module, s3, bucket, obj, expiry) + url = put_download_url(s3, bucket, obj, expiry) module.exit_json(msg="PUT operation complete", url=url, tags=tags, changed=True) @@ -722,29 +783,37 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): # because the stream's dropped on the floor, we never pull the data and this is the # functional equivalent of calling get_head which still relying on the 'GET' permission if version: - s3.get_object(Bucket=bucket, Key=obj, VersionId=version) + s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version) else: - s3.get_object(Bucket=bucket, Key=obj) - except is_boto3_error_code(['404', '403']) as e: + s3.get_object(aws_retry=True, Bucket=bucket, Key=obj) + except is_boto3_error_code(["404", "403"]) as e: # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but # user does not have the s3:GetObject permission. 404 errors are handled by download_file(). - module.fail_json_aws(e, msg="Could not find the key %s." % obj) - except is_boto3_error_message('require AWS Signature Version 4'): # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Could not find the key {obj}.") + except is_boto3_error_message("require AWS Signature Version 4"): # pylint: disable=duplicate-except raise Sigv4Required() - except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not find the key %s." % obj) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not find the key %s." % obj) - - optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {} + except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Could not find the key {obj}.") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure(f"Could not find the key {obj}.", e) + + optional_kwargs = {"ExtraArgs": {"VersionId": version}} if version else {} for x in range(0, retries + 1): try: - s3.download_file(bucket, obj, dest, **optional_kwargs) + s3.download_file(bucket, obj, dest, aws_retry=True, **optional_kwargs) module.exit_json(msg="GET operation complete", changed=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: # actually fail on last pass through the loop. if x >= retries: - module.fail_json_aws(e, msg="Failed while downloading %s." % obj) + raise S3ObjectFailure(f"Failed while downloading {obj}.", e) # otherwise, try again, this may be a transient timeout. except SSLError as e: # will ClientError catch SSLError? # actually fail on last pass through the loop. @@ -753,171 +822,124 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): # otherwise, try again, this may be a transient timeout. -def download_s3str(module, s3, bucket, obj, version=None, validate=True): +def download_s3str(module, s3, bucket, obj, version=None): if module.check_mode: module.exit_json(msg="GET operation skipped - running in check mode", changed=True) try: if version: - contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read()) + contents = to_native( + s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)["Body"].read() + ) else: - contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read()) + contents = to_native(s3.get_object(aws_retry=True, Bucket=bucket, Key=obj)["Body"].read()) module.exit_json(msg="GET operation complete", contents=contents, changed=True) - except is_boto3_error_message('require AWS Signature Version 4'): + except is_boto3_error_message("require AWS Signature Version 4"): raise Sigv4Required() - except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg=f"Failed while getting contents of object {obj} as a string.", + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure(f"Failed while getting contents of object {obj} as a string.", e) def get_download_url(module, s3, bucket, obj, expiry, tags=None, changed=True): try: - url = s3.generate_presigned_url(ClientMethod='get_object', - Params={'Bucket': bucket, 'Key': obj}, - ExpiresIn=expiry) - module.exit_json(msg="Download url:", url=url, tags=tags, expiry=expiry, changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed while getting download url.") - - -def put_download_url(module, s3, bucket, obj, expiry): - try: - url = s3.generate_presigned_url(ClientMethod='put_object', - Params={'Bucket': bucket, 'Key': obj}, - ExpiresIn=expiry) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to generate presigned URL") - return url + url = s3.generate_presigned_url( + # aws_retry=True, + ClientMethod="get_object", + Params={"Bucket": bucket, "Key": obj}, + ExpiresIn=expiry, + ) + module.exit_json( + msg="Download url:", + url=url, + tags=tags, + expiry=expiry, + changed=changed, + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Failed while getting download url.", e) -def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag): - if module.check_mode: - module.exit_json(msg="COPY operation skipped - running in check mode", changed=True) +def put_download_url(s3, bucket, obj, expiry): try: - params = {'Bucket': bucket, 'Key': obj} - bucketsrc = {'Bucket': module.params['copy_src'].get('bucket'), 'Key': module.params['copy_src'].get('object')} - version = None - if module.params['copy_src'].get('version_id') is not None: - version = module.params['copy_src'].get('version_id') - bucketsrc.update({'VersionId': version}) - if not key_check(module, s3, bucketsrc['Bucket'], bucketsrc['Key'], version=version, validate=validate): - # Key does not exist in source bucket - module.exit_json(msg="Key %s does not exist in bucket %s." % (bucketsrc['Key'], bucketsrc['Bucket']), changed=False) - - s_etag = get_etag(s3, bucketsrc['Bucket'], bucketsrc['Key'], version=version) - if s_etag == d_etag: - # Tags - tags, changed = ensure_tags(s3, module, bucket, obj) - if not changed: - module.exit_json(msg="ETag from source and destination are the same", changed=False) - else: - module.exit_json(msg="tags successfully updated.", changed=changed, tags=tags) - else: - params.update({'CopySource': bucketsrc}) - if encrypt: - params['ServerSideEncryption'] = module.params['encryption_mode'] - if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': - params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] - if metadata: - params['Metadata'] = {} - # determine object metadata and extra arguments - for option in metadata: - extra_args_option = option_in_extra_args(option) - if extra_args_option is not None: - params[extra_args_option] = metadata[option] - else: - params['Metadata'][option] = metadata[option] - s3.copy_object(**params) - for acl in module.params.get('permission'): - s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) - # Tags - tags, changed = ensure_tags(s3, module, bucket, obj) - module.exit_json(msg="Object copied from bucket %s to bucket %s." % (bucketsrc['Bucket'], bucket), tags=tags, changed=True) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while copying object %s from bucket %s." % (obj, module.params['copy_src'].get('Bucket'))) - - -def is_fakes3(endpoint_url): - """ Return True if endpoint_url has scheme fakes3:// """ - if endpoint_url is not None: - return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') - else: - return False - + url = s3.generate_presigned_url( + # aws_retry=True, + ClientMethod="put_object", + Params={"Bucket": bucket, "Key": obj}, + ExpiresIn=expiry, + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Unable to generate presigned URL", e) -def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False): - if ceph: # TODO - test this - ceph = urlparse(endpoint_url) - params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', - region=location, endpoint=endpoint_url, **aws_connect_kwargs) - elif is_fakes3(endpoint_url): - fakes3 = urlparse(endpoint_url) - port = fakes3.port - if fakes3.scheme == 'fakes3s': - protocol = "https" - if port is None: - port = 443 - else: - protocol = "http" - if port is None: - port = 80 - params = dict(module=module, conn_type='client', resource='s3', region=location, - endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), - use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) - else: - params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) - if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': - params['config'] = botocore.client.Config(signature_version='s3v4') - elif module.params['mode'] in ('get', 'getstr', 'geturl') and sig_4: - params['config'] = botocore.client.Config(signature_version='s3v4') - if module.params['dualstack']: - dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) - if 'config' in params: - params['config'] = params['config'].merge(dualconf) - else: - params['config'] = dualconf - return boto3_conn(**params) + return url -def get_current_object_tags_dict(s3, bucket, obj, version=None): +def get_current_object_tags_dict(module, s3, bucket, obj, version=None): try: if version: - current_tags = s3.get_object_tagging(Bucket=bucket, Key=obj, VersionId=version).get('TagSet') + current_tags = s3.get_object_tagging(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version).get( + "TagSet" + ) else: - current_tags = s3.get_object_tagging(Bucket=bucket, Key=obj).get('TagSet') - except is_boto3_error_code('NoSuchTagSet'): + current_tags = s3.get_object_tagging(aws_retry=True, Bucket=bucket, Key=obj).get("TagSet") + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("GetObjectTagging is not implemented by your storage provider.") return {} - except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except + except is_boto3_error_code(["NoSuchTagSet", "NoSuchTagSetError"]): return {} - return boto3_tag_list_to_ansible_dict(current_tags) -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_object_tagging(s3, bucket, obj, tags): - s3.put_object_tagging(Bucket=bucket, Key=obj, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + s3.put_object_tagging( + Bucket=bucket, + Key=obj, + Tagging={"TagSet": ansible_dict_to_boto3_tag_list(tags)}, + ) -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_object_tagging(s3, bucket, obj): s3.delete_object_tagging(Bucket=bucket, Key=obj) def wait_tags_are_applied(module, s3, bucket, obj, expected_tags_dict, version=None): - for dummy in range(0, 12): + for _dummy in range(0, 12): try: - current_tags_dict = get_current_object_tags_dict(s3, bucket, obj, version) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to get object tags.") + current_tags_dict = get_current_object_tags_dict(module, s3, bucket, obj, version) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Failed to get object tags.", e) + if current_tags_dict != expected_tags_dict: time.sleep(5) else: return current_tags_dict - module.fail_json(msg="Object tags failed to apply in the expected time.", - requested_tags=expected_tags_dict, live_tags=current_tags_dict) + module.fail_json( + msg="Object tags failed to apply in the expected time.", + requested_tags=expected_tags_dict, + live_tags=current_tags_dict, + ) def ensure_tags(client, module, bucket, obj): @@ -926,362 +948,642 @@ def ensure_tags(client, module, bucket, obj): changed = False try: - current_tags_dict = get_current_object_tags_dict(client, bucket, obj) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("GetObjectTagging is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning.") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get object tags.") + current_tags_dict = get_current_object_tags_dict(module, client, bucket, obj) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure("Failed to get object tags.", e) + + # Tags is None, we shouldn't touch anything + if tags is None: + return current_tags_dict, changed + + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_copy = current_tags_dict.copy() + current_copy.update(tags) + tags = current_copy + + # Nothing to change, we shouldn't touch anything + if current_tags_dict == tags: + return current_tags_dict, changed + + if tags: + try: + put_object_tagging(client, bucket, obj, tags) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Failed to update object tags.", e) else: - if tags is not None: - if not purge_tags: - # Ensure existing tags that aren't updated by desired tags remain - current_copy = current_tags_dict.copy() - current_copy.update(tags) - tags = current_copy - if current_tags_dict != tags: - if tags: - try: - put_object_tagging(client, bucket, obj, tags) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to update object tags.") - else: - if purge_tags: - try: - delete_object_tagging(client, bucket, obj) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete object tags.") - current_tags_dict = wait_tags_are_applied(module, client, bucket, obj, tags) - changed = True + try: + delete_object_tagging(client, bucket, obj) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Failed to delete object tags.", e) + + current_tags_dict = wait_tags_are_applied(module, client, bucket, obj, tags) + changed = True + return current_tags_dict, changed -def main(): - # Beware: this module uses an action plugin (plugins/action/s3_object.py) - # so that src parameter can be either in 'files/' lookup path on the - # controller, *or* on the remote host that the task is executed on. +def get_binary_content(vars): + # the content will be uploaded as a byte string, so we must encode it first + bincontent = None + if vars.get("content"): + bincontent = vars["content"].encode("utf-8") + if vars.get("content_base64"): + bincontent = base64.standard_b64decode(vars["content_base64"]) + return bincontent - argument_spec = dict( - bucket=dict(required=True), - dest=dict(default=None, type='path'), - encrypt=dict(default=True, type='bool'), - encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), - expiry=dict(default=600, type='int', aliases=['expiration']), - headers=dict(type='dict'), - marker=dict(default=""), - max_keys=dict(default=1000, type='int', no_log=False), - metadata=dict(type='dict'), - mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'], required=True), - sig_v4=dict(default=True, type='bool'), - object=dict(), - permission=dict(type='list', elements='str', default=['private']), - version=dict(default=None), - overwrite=dict(aliases=['force'], default='different'), - prefix=dict(default=""), - retries=dict(aliases=['retry'], type='int', default=0), - dualstack=dict(default=False, type='bool'), - ceph=dict(default=False, type='bool', aliases=['rgw']), - src=dict(type='path'), - content=dict(), - content_base64=dict(), - ignore_nonexistent_bucket=dict(default=False, type='bool'), - encryption_kms_key_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - copy_src=dict(type='dict', options=dict(bucket=dict(required=True), object=dict(required=True), version_id=dict())), - validate_bucket_name=dict(type='bool', default=True), - ) - required_if = [ - ['ceph', True, ['endpoint_url']], - ['mode', 'put', ['object']], - ['mode', 'get', ['dest', 'object']], - ['mode', 'getstr', ['object']], - ['mode', 'geturl', ['object']], - ['mode', 'copy', ['copy_src']], - ] +def s3_object_do_get(module, connection, connection_v4, s3_vars): + if module.params.get("sig_v4"): + connection = connection_v4 - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=required_if, - mutually_exclusive=[['content', 'content_base64', 'src']], + keyrtn = key_check( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + validate=s3_vars["validate"], ) + if not keyrtn: + if s3_vars["version"]: + module.fail_json(msg=f"Key {s3_vars['object']} with version id {s3_vars['version']} does not exist.") + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") + if s3_vars["dest"] and path_check(s3_vars["dest"]) and s3_vars["overwrite"] != "always": + if s3_vars["overwrite"] == "never": + module.exit_json( + msg="Local object already exists and overwrite is disabled.", + changed=False, + ) + if s3_vars["overwrite"] == "different" and etag_compare( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + local_file=s3_vars["dest"], + ): + module.exit_json( + msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", + changed=False, + ) + if s3_vars["overwrite"] == "latest" and is_local_object_latest( + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + local_file=s3_vars["dest"], + ): + module.exit_json( + msg="Local object is latest, ignoreing. Use overwrite=always parameter to force.", + changed=False, + ) - bucket = module.params.get('bucket') - encrypt = module.params.get('encrypt') - expiry = module.params.get('expiry') - dest = module.params.get('dest', '') - headers = module.params.get('headers') - marker = module.params.get('marker') - max_keys = module.params.get('max_keys') - metadata = module.params.get('metadata') - mode = module.params.get('mode') - obj = module.params.get('object') - version = module.params.get('version') - overwrite = module.params.get('overwrite') - sig_v4 = module.params.get('sig_v4') - prefix = module.params.get('prefix') - retries = module.params.get('retries') - endpoint_url = module.params.get('endpoint_url') - dualstack = module.params.get('dualstack') - ceph = module.params.get('ceph') - src = module.params.get('src') - content = module.params.get('content') - content_base64 = module.params.get('content_base64') - ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') - - object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"] - bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"] - - if module.params.get('validate_bucket_name'): - validate_bucket_name(module, bucket) - - if overwrite not in ['always', 'never', 'different', 'latest']: - if module.boolean(overwrite): - overwrite = 'always' - else: - overwrite = 'never' + try: + download_s3file( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["dest"], + s3_vars["retries"], + version=s3_vars["version"], + ) + except Sigv4Required: + download_s3file( + module, + connection_v4, + s3_vars["bucket"], + s3_vars["obj"], + s3_vars["dest"], + s3_vars["retries"], + version=s3_vars["version"], + ) - if overwrite == 'different' and not HAS_MD5: - module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support') + module.exit_json(failed=False) - region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if region in ('us-east-1', '', None): - # default to US Standard region - location = 'us-east-1' - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - - if module.params.get('object'): - obj = module.params['object'] - # If there is a top level object, do nothing - if the object starts with / - # remove the leading character to maintain compatibility with Ansible versions < 2.4 - if obj.startswith('/'): - obj = obj[1:] +def s3_object_do_put(module, connection, connection_v4, s3_vars): + # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified + # these were separated into the variables bucket_acl and object_acl above - # Bucket deletion does not require obj. Prevents ambiguity with delobj. - if obj and mode == "delete": - module.fail_json(msg='Parameter obj cannot be used with mode=delete') + # if encryption mode is set to aws:kms then we're forced to use s3v4, no point trying the + # original signature. + if module.params.get("encryption_mode") == "aws:kms": + connection = connection_v4 - # allow eucarc environment variables to be used if ansible vars aren't set - if not endpoint_url and 'S3_URL' in os.environ: - endpoint_url = os.environ['S3_URL'] - module.deprecate( - "Support for the 'S3_URL' environment variable has been " - "deprecated. We recommend using the 'endpoint_url' module " - "parameter. Alternatively, the 'AWS_URL' environment variable can " - "be used instead.", - date='2024-12-01', collection_name='amazon.aws', - ) + if s3_vars["src"] is not None and not path_check(s3_vars["src"]): + module.fail_json(msg=f"Local object \"{s3_vars['src']}\" does not exist for PUT operation") - if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url: - module.fail_json(msg='dualstack only applies to AWS S3') + keyrtn = key_check( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + validate=s3_vars["validate"], + ) - # Look at endpoint_url and tweak connection settings - # if connecting to RGW, Walrus or fakes3 - if endpoint_url: - for key in ['validate_certs', 'security_token', 'profile_name']: - aws_connect_kwargs.pop(key, None) - s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_v4) + # the content will be uploaded as a byte string, so we must encode it first + bincontent = get_binary_content(s3_vars) + + if keyrtn and s3_vars["overwrite"] != "always": + if s3_vars["overwrite"] == "never" or etag_compare( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + local_file=s3_vars["src"], + content=bincontent, + ): + # Return the download URL for the existing object and ensure tags are updated + tags, tags_update = ensure_tags(connection, module, s3_vars["bucket"], s3_vars["object"]) + get_download_url( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["expiry"], + tags, + changed=tags_update, + ) + + # only use valid object acls for the upload_s3file function + if not s3_vars["acl_disabled"]: + s3_vars["permission"] = s3_vars["object_acl"] + upload_s3file( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["expiry"], + s3_vars["metadata"], + s3_vars["encrypt"], + s3_vars["headers"], + src=s3_vars["src"], + content=bincontent, + acl_disabled=s3_vars["acl_disabled"], + ) + module.exit_json(failed=False) - validate = not ignore_nonexistent_bucket - # check if bucket exists, if yes, check if ACL is disabled - acl_disabled = False - exists = bucket_check(module, s3, bucket) - if exists: - try: - ownership_controls = s3.get_bucket_ownership_controls(Bucket=bucket)['OwnershipControls'] - if ownership_controls.get('Rules'): - object_ownership = ownership_controls['Rules'][0]['ObjectOwnership'] - if object_ownership == 'BucketOwnerEnforced': - acl_disabled = True - # if bucket ownership controls are not found - except botocore.exceptions.ClientError: - pass - - # separate types of ACLs - if not acl_disabled: - bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl] - object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl] - error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl] - if error_acl: - module.fail_json(msg='Unknown permission specified: %s' % error_acl) - - # First, we check to see if the bucket exists, we get "bucket" returned. - bucketrtn = bucket_check(module, s3, bucket, validate=validate) - - if validate and mode not in ('create', 'put', 'delete', 'copy') and not bucketrtn: - module.fail_json(msg="Source bucket cannot be found.") - - if mode == 'get': - keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) - if keyrtn is False: - if version: - module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) - else: - module.fail_json(msg="Key %s does not exist." % obj) +def s3_object_do_delobj(module, connection, connection_v4, s3_vars): + # Delete an object from a bucket, not the entire bucket + if not s3_vars.get("object", None): + module.fail_json(msg="object parameter is required") + elif s3_vars["bucket"] and delete_key(module, connection, s3_vars["bucket"], s3_vars["object"]): + module.exit_json( + msg=f"Object deleted from bucket {s3_vars['bucket']}.", + changed=True, + ) + else: + module.fail_json(msg="Bucket parameter is required.") - if dest and path_check(dest) and overwrite != 'always': - if overwrite == 'never': - module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False) - if overwrite == 'different' and etag_compare(module, s3, bucket, obj, version=version, local_file=dest): - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) - if overwrite == 'latest' and is_local_object_latest(module, s3, bucket, obj, version=version, local_file=dest): - module.exit_json(msg="Local object is latest, ignoreing. Use overwrite=always parameter to force.", changed=False) - try: - download_s3file(module, s3, bucket, obj, dest, retries, version=version) - except Sigv4Required: - s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True) - download_s3file(module, s3, bucket, obj, dest, retries, version=version) +def s3_object_do_list(module, connection, connection_v4, s3_vars): + # If the bucket does not exist then bail out + keys = list_keys( + connection, + s3_vars["bucket"], + s3_vars["prefix"], + s3_vars["marker"], + s3_vars["max_keys"], + ) - if mode == 'put': + module.exit_json(msg="LIST operation complete", s3_keys=keys) - # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified - # these were separated into the variables bucket_acl and object_acl above - if content is None and content_base64 is None and src is None: - module.fail_json(msg='Either content, content_base64 or src must be specified for PUT operations') - if src is not None and not path_check(src): - module.fail_json(msg='Local object "%s" does not exist for PUT operation' % (src)) +def s3_object_do_create(module, connection, connection_v4, s3_vars): + # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified + # these were separated above into the variables bucket_acl and object_acl - keyrtn = None - if bucketrtn: - keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) - else: - # If the bucket doesn't exist we should create it. - # only use valid bucket acls for create_bucket function - module.params['permission'] = bucket_acl - create_bucket(module, s3, bucket, location) - - # the content will be uploaded as a byte string, so we must encode it first - bincontent = None - if content is not None: - bincontent = content.encode('utf-8') - if content_base64 is not None: - bincontent = base64.standard_b64decode(content_base64) - - if keyrtn and overwrite != 'always': - if overwrite == 'never' or etag_compare(module, s3, bucket, obj, version=version, local_file=src, content=bincontent): - # Return the download URL for the existing object and ensure tags are updated - tags, tags_update = ensure_tags(s3, module, bucket, obj) - get_download_url(module, s3, bucket, obj, expiry, tags, changed=tags_update) - - # only use valid object acls for the upload_s3file function - if not acl_disabled: - module.params['permission'] = object_acl - upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=src, content=bincontent, acl_disabled=acl_disabled) + if not s3_vars["object"].endswith("/"): + s3_vars["object"] = s3_vars["object"] + "/" - # Delete an object from a bucket, not the entire bucket - if mode == 'delobj': - if obj is None: - module.fail_json(msg="object parameter is required") - if bucket: - deletertn = delete_key(module, s3, bucket, obj) - if deletertn is True: - module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) + if key_check(module, connection, s3_vars["bucket"], s3_vars["object"]): + module.exit_json( + msg=f"Bucket {s3_vars['bucket']} and key {s3_vars['object']} already exists.", + changed=False, + ) + if not s3_vars["acl_disabled"]: + # setting valid object acls for the create_dirkey function + s3_vars["permission"] = s3_vars["object_acl"] + create_dirkey( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["encrypt"], + s3_vars["expiry"], + ) + + +def s3_object_do_geturl(module, connection, connection_v4, s3_vars): + if module.params.get("sig_v4"): + connection = connection_v4 + + if key_check( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + validate=s3_vars["validate"], + ): + tags = get_current_object_tags_dict( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + ) + get_download_url( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["expiry"], + tags, + ) + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") + + +def s3_object_do_getstr(module, connection, connection_v4, s3_vars): + if module.params.get("sig_v4"): + connection = connection_v4 + + if s3_vars["bucket"] and s3_vars["object"]: + if key_check( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + validate=s3_vars["validate"], + ): + try: + download_s3str( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + ) + except Sigv4Required: + download_s3str( + module, + connection_v4, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + ) + elif s3_vars["version"]: + module.fail_json(msg=f"Key {s3_vars['object']} with version id {s3_vars['version']} does not exist.") else: - module.fail_json(msg="Bucket parameter is required.") - - # Delete an entire bucket, including all objects in the bucket - if mode == 'delete': - if bucket: - deletertn = delete_bucket(module, s3, bucket) - if deletertn is True: - module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True) + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") + + +def check_object_tags(module, connection, bucket, obj): + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + diff = False + if tags: + current_tags_dict = get_current_object_tags_dict(module, connection, bucket, obj) + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_tags_dict.update(tags) + diff = current_tags_dict != tags + return diff + + +def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, src_bucket, src_obj, versionId=None): + try: + params = {"Bucket": bucket, "Key": obj} + if not key_check(module, s3, src_bucket, src_obj, version=versionId, validate=validate): + # Key does not exist in source bucket + module.exit_json( + msg=f"Key {src_obj} does not exist in bucket {src_bucket}.", + changed=False, + ) + + s_etag = get_etag(s3, src_bucket, src_obj, version=versionId) + d_etag = get_etag(s3, bucket, obj) + if s_etag == d_etag: + if module.check_mode: + changed = check_object_tags(module, s3, bucket, obj) + result = {} + if changed: + result.update({"msg": "Would have update object tags is not running in check mode."}) + return changed, result + + # Ensure tags + tags, changed = ensure_tags(s3, module, bucket, obj) + result = {"msg": "ETag from source and destination are the same"} + if changed: + result = {"msg": "tags successfully updated.", "tags": tags} + return changed, result + elif module.check_mode: + return True, {"msg": "ETag from source and destination differ"} else: - module.fail_json(msg="Bucket parameter is required.") + changed = True + bucketsrc = { + "Bucket": src_bucket, + "Key": src_obj, + } + if versionId: + bucketsrc.update({"VersionId": versionId}) + params.update({"CopySource": bucketsrc}) + params.update( + get_extra_params( + encrypt, + module.params.get("encryption_mode"), + module.params.get("encryption_kms_key_id"), + metadata, + ) + ) + s3.copy_object(aws_retry=True, **params) + put_object_acl(module, s3, bucket, obj) + # Tags + tags, tags_updated = ensure_tags(s3, module, bucket, obj) + msg = f"Object copied from bucket {bucketsrc['Bucket']} to bucket {bucket}." + return changed, {"msg": msg, "tags": tags} + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure( + f"Failed while copying object {obj} from bucket {module.params['copy_src'].get('Bucket')}.", + e, + ) - # Support for listing a set of keys - if mode == 'list': - # If the bucket does not exist then bail out - if not bucketrtn: - module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) +def s3_object_do_copy(module, connection, connection_v4, s3_vars): + copy_src = module.params.get("copy_src") + if not copy_src.get("object") and s3_vars["object"]: + module.fail_json( + msg="A destination object was specified while trying to copy all the objects from the source bucket." + ) + src_bucket = copy_src.get("bucket") + if not copy_src.get("object"): + # copy recursively object(s) from source bucket to destination bucket + # list all the objects from the source bucket + keys = list_keys(connection, src_bucket, copy_src.get("prefix")) + if len(keys) == 0: + module.exit_json(msg=f"No object found to be copied from source bucket {src_bucket}.") + + changed = False + number_keys_updated = 0 + for key in keys: + updated, result = copy_object_to_bucket( + module, + connection, + s3_vars["bucket"], + key, + s3_vars["encrypt"], + s3_vars["metadata"], + s3_vars["validate"], + src_bucket, + key, + versionId=copy_src.get("version_id"), + ) + changed |= updated + number_keys_updated += 1 if updated else 0 + + msg = f"object(s) from buckets '{src_bucket}' and '{s3_vars['bucket']}' are the same." + if number_keys_updated: + msg = f"{number_keys_updated} copied into bucket '{s3_vars['bucket']}'" + module.exit_json(changed=changed, msg=msg) + else: + # copy single object from source bucket into destination bucket + changed, result = copy_object_to_bucket( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["encrypt"], + s3_vars["metadata"], + s3_vars["validate"], + src_bucket, + copy_src.get("object"), + versionId=copy_src.get("version_id"), + ) + module.exit_json(changed=changed, **result) - list_keys(module, s3, bucket, prefix, marker, max_keys) - # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. - # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. - if mode == 'create': +def populate_params(module): + # Copy the parameters dict, we shouldn't be directly modifying it. + variable_dict = copy.deepcopy(module.params) - # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified - # these were separated above into the variables bucket_acl and object_acl + if variable_dict["validate_bucket_name"]: + validate_bucket_name(variable_dict["bucket"]) - if bucket and not obj: - if bucketrtn: - module.exit_json(msg="Bucket already exists.", changed=False) - else: - # only use valid bucket acls when creating the bucket - module.params['permission'] = bucket_acl - module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) - if bucket and obj: - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" - if bucketrtn: - if key_check(module, s3, bucket, dirobj): - module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) - else: - # setting valid object acls for the create_dirkey function - module.params['permission'] = object_acl - create_dirkey(module, s3, bucket, dirobj, encrypt, expiry) - else: - # only use valid bucket acls for the create_bucket function - module.params['permission'] = bucket_acl - create_bucket(module, s3, bucket, location) - # only use valid object acls for the create_dirkey function - module.params['permission'] = object_acl - create_dirkey(module, s3, bucket, dirobj, encrypt, expiry) - - # Support for grabbing the time-expired URL for an object in S3/Walrus. - if mode == 'geturl': - if not bucket and not obj: - module.fail_json(msg="Bucket and Object parameters must be set") - - keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) - if keyrtn: - tags = get_current_object_tags_dict(s3, bucket, obj, version=version) - get_download_url(module, s3, bucket, obj, expiry, tags) + if variable_dict.get("overwrite") == "different" and not HAS_MD5: + module.fail_json(msg="overwrite=different is unavailable: ETag calculation requires MD5 support") + + if variable_dict.get("overwrite") not in [ + "always", + "never", + "different", + "latest", + ]: + if module.boolean(variable_dict["overwrite"]): + variable_dict["overwrite"] = "always" else: - module.fail_json(msg="Key %s does not exist." % obj) - - if mode == 'getstr': - if bucket and obj: - keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) - if keyrtn: - try: - download_s3str(module, s3, bucket, obj, version=version) - except Sigv4Required: - s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True) - download_s3str(module, s3, bucket, obj, version=version) - elif version is not None: - module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) - else: - module.fail_json(msg="Key %s does not exist." % obj) - - if mode == 'copy': - # if copying an object in a bucket yet to be created, acls for the bucket and/or the object may be specified - # these were separated into the variables bucket_acl and object_acl above - d_etag = None - if bucketrtn: - d_etag = get_etag(s3, bucket, obj) + variable_dict["overwrite"] = "never" + + # Bucket deletion does not require obj. Prevents ambiguity with delobj. + if variable_dict["object"]: + if variable_dict.get("mode") == "delete": + module.fail_json(msg="Parameter object cannot be used with mode=delete") + obj = variable_dict["object"] + # If the object starts with / remove the leading character + if obj.startswith("/"): + obj = obj[1:] + variable_dict["object"] = obj + module.deprecate( + "Support for passing object key names with a leading '/' has been deprecated.", + date="2025-12-01", + collection_name="amazon.aws", + ) + + variable_dict["validate"] = not variable_dict["ignore_nonexistent_bucket"] + variable_dict["acl_disabled"] = False + + return variable_dict + + +def validate_bucket(module, s3, var_dict): + bucket_check(module, s3, var_dict["bucket"], validate=var_dict["validate"]) + + try: + ownership_controls = s3.get_bucket_ownership_controls(aws_retry=True, Bucket=var_dict["bucket"])[ + "OwnershipControls" + ] + if ownership_controls.get("Rules"): + object_ownership = ownership_controls["Rules"][0]["ObjectOwnership"] + if object_ownership == "BucketOwnerEnforced": + var_dict["acl_disabled"] = True + # if bucket ownership controls are not found + except botocore.exceptions.ClientError: + pass + + if not var_dict["acl_disabled"]: + var_dict["object_acl"] = list(var_dict.get("permission")) + + return var_dict + + +def main(): + # Beware: this module uses an action plugin (plugins/action/s3_object.py) + # so that src parameter can be either in 'files/' lookup path on the + # controller, *or* on the remote host that the task is executed on. + + valid_modes = ["get", "put", "create", "geturl", "getstr", "delobj", "list", "copy"] + valid_acls = [ + "private", + "public-read", + "public-read-write", + "aws-exec-read", + "authenticated-read", + "bucket-owner-read", + "bucket-owner-full-control", + ] + + argument_spec = dict( + bucket=dict(required=True), + dest=dict(default=None, type="path"), + encrypt=dict(default=True, type="bool"), + encryption_mode=dict(choices=["AES256", "aws:kms"], default="AES256"), + expiry=dict(default=600, type="int", aliases=["expiration"]), + headers=dict(type="dict"), + marker=dict(default=""), + max_keys=dict(default=1000, type="int", no_log=False), + metadata=dict(type="dict"), + mode=dict(choices=valid_modes, required=True), + sig_v4=dict(default=True, type="bool"), + object=dict(), + permission=dict(type="list", elements="str", default=["private"], choices=valid_acls), + version=dict(default=None), + overwrite=dict(aliases=["force"], default="different"), + prefix=dict(default=""), + retries=dict(aliases=["retry"], type="int", default=0), + dualstack=dict(default=False, type="bool"), + ceph=dict(default=False, type="bool", aliases=["rgw"]), + src=dict(type="path"), + content=dict(), + content_base64=dict(), + ignore_nonexistent_bucket=dict(default=False, type="bool"), + encryption_kms_key_id=dict(), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + copy_src=dict( + type="dict", + options=dict( + bucket=dict(required=True), + object=dict(), + prefix=dict(default=""), + version_id=dict(), + ), + ), + validate_bucket_name=dict(type="bool", default=True), + ) + + required_if = [ + ["ceph", True, ["endpoint_url"]], + ["mode", "put", ["object"]], + ["mode", "put", ["content", "content_base64", "src"], True], + ["mode", "create", ["object"]], + ["mode", "get", ["dest", "object"]], + ["mode", "getstr", ["object"]], + ["mode", "geturl", ["object"]], + ["mode", "copy", ["copy_src"]], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + mutually_exclusive=[["content", "content_base64", "src"]], + ) + + endpoint_url = module.params.get("endpoint_url") + dualstack = module.params.get("dualstack") + + if dualstack and endpoint_url: + module.deprecate( + ( + "Support for passing both the 'dualstack' and 'endpoint_url' parameters at the same " + "time has been deprecated." + ), + date="2024-12-01", + collection_name="amazon.aws", + ) + if "amazonaws.com" not in endpoint_url: + module.fail_json(msg="dualstack only applies to AWS S3") + + if module.params.get("overwrite") not in ("always", "never", "different", "latest"): + module.deprecate( + ( + "Support for passing values of 'overwrite' other than 'always', 'never', " + "'different' or 'latest', has been deprecated." + ), + date="2024-12-01", + collection_name="amazon.aws", + ) + + extra_params = s3_extra_params(module.params, sigv4=False) + extra_params_v4 = s3_extra_params(module.params, sigv4=True) + retry_decorator = AWSRetry.jittered_backoff() + try: + s3 = module.client("s3", retry_decorator=retry_decorator, **extra_params) + s3_v4 = module.client("s3", retry_decorator=retry_decorator, **extra_params_v4) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + s3_object_params = populate_params(module) + s3_object_params.update(validate_bucket(module, s3, s3_object_params)) + + func_mapping = { + "get": s3_object_do_get, + "put": s3_object_do_put, + "delobj": s3_object_do_delobj, + "list": s3_object_do_list, + "create": s3_object_do_create, + "geturl": s3_object_do_geturl, + "getstr": s3_object_do_getstr, + "copy": s3_object_do_copy, + } + func = func_mapping[s3_object_params["mode"]] + try: + func(module, s3, s3_v4, s3_object_params) + except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Invalid endpoint provided") + except S3ObjectFailure as e: + if e.original_e: + module.fail_json_aws(e.original_e, e.message) else: - # If the bucket doesn't exist we should create it. - # only use valid bucket acls for create_bucket function - module.params['permission'] = bucket_acl - create_bucket(module, s3, bucket, location) - # only use valid object acls for the copy operation - module.params['permission'] = object_acl - copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag) + module.fail_json(e.message) module.exit_json(failed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py index 88e66dc4f..65bd5e328 100644 --- a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_object_info version_added: 5.0.0 @@ -36,6 +34,9 @@ options: dualstack: description: - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Support for passing I(dualstack) and I(endpoint_url) at the same time has been deprecated, + the dualstack endpoints are automatically configured using the configured I(region). + Support will be removed in a release after 2024-12-01. type: bool default: false ceph: @@ -86,7 +87,6 @@ options: object_attributes: description: - Retreive S3 object attributes. - - Requires minimum botocore version 1.24.7. required: false type: bool default: false @@ -102,13 +102,12 @@ notes: deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter or the C(AWS_URL) environment variable. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Retrieve a list of objects in S3 bucket @@ -149,10 +148,9 @@ EXAMPLES = r''' attributes_list: - ETag - ObjectSize +""" -''' - -RETURN = r''' +RETURN = r""" s3_keys: description: List of object keys. returned: when only I(bucket_name) is specified and I(object_name), I(object_details) are not specified. @@ -431,31 +429,26 @@ object_info: returned: if it was upload with the object. type: str sample: "xxxxxxxxxxxx" -''' - -import os +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import to_text -from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.s3 import s3_extra_params +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict def describe_s3_object_acl(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name object_acl_info = {} @@ -466,7 +459,7 @@ def describe_s3_object_acl(connection, bucket_name, object_name): if len(object_acl_info) != 0: # Remove ResponseMetadata from object_acl_info, convert to snake_case - del object_acl_info['ResponseMetadata'] + del object_acl_info["ResponseMetadata"] object_acl_info = camel_dict_to_snake_dict(object_acl_info) return object_acl_info @@ -474,20 +467,20 @@ def describe_s3_object_acl(connection, bucket_name, object_name): def describe_s3_object_attributes(connection, module, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name - params['ObjectAttributes'] = module.params.get('object_details')['attributes_list'] + params["Bucket"] = bucket_name + params["Key"] = object_name + params["ObjectAttributes"] = module.params.get("object_details")["attributes_list"] object_attributes_info = {} try: object_attributes_info = connection.get_object_attributes(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - object_attributes_info['msg'] = 'Object attributes not found' + object_attributes_info["msg"] = "Object attributes not found" - if len(object_attributes_info) != 0 and 'msg' not in object_attributes_info.keys(): + if len(object_attributes_info) != 0 and "msg" not in object_attributes_info.keys(): # Remove ResponseMetadata from object_attributes_info, convert to snake_case - del object_attributes_info['ResponseMetadata'] + del object_attributes_info["ResponseMetadata"] object_attributes_info = camel_dict_to_snake_dict(object_attributes_info) return object_attributes_info @@ -495,8 +488,8 @@ def describe_s3_object_attributes(connection, module, bucket_name, object_name): def describe_s3_object_legal_hold(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name object_legal_hold_info = {} @@ -507,7 +500,7 @@ def describe_s3_object_legal_hold(connection, bucket_name, object_name): if len(object_legal_hold_info) != 0: # Remove ResponseMetadata from object_legal_hold_info, convert to snake_case - del object_legal_hold_info['ResponseMetadata'] + del object_legal_hold_info["ResponseMetadata"] object_legal_hold_info = camel_dict_to_snake_dict(object_legal_hold_info) return object_legal_hold_info @@ -515,7 +508,7 @@ def describe_s3_object_legal_hold(connection, bucket_name, object_name): def describe_s3_object_lock_configuration(connection, bucket_name): params = {} - params['Bucket'] = bucket_name + params["Bucket"] = bucket_name object_legal_lock_configuration_info = {} @@ -526,7 +519,7 @@ def describe_s3_object_lock_configuration(connection, bucket_name): if len(object_legal_lock_configuration_info) != 0: # Remove ResponseMetadata from object_legal_lock_configuration_info, convert to snake_case - del object_legal_lock_configuration_info['ResponseMetadata'] + del object_legal_lock_configuration_info["ResponseMetadata"] object_legal_lock_configuration_info = camel_dict_to_snake_dict(object_legal_lock_configuration_info) return object_legal_lock_configuration_info @@ -534,8 +527,8 @@ def describe_s3_object_lock_configuration(connection, bucket_name): def describe_s3_object_retention(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name object_retention_info = {} @@ -546,7 +539,7 @@ def describe_s3_object_retention(connection, bucket_name, object_name): if len(object_retention_info) != 0: # Remove ResponseMetadata from object_retention_info, convert to snake_case - del object_retention_info['ResponseMetadata'] + del object_retention_info["ResponseMetadata"] object_retention_info = camel_dict_to_snake_dict(object_retention_info) return object_retention_info @@ -554,8 +547,8 @@ def describe_s3_object_retention(connection, bucket_name, object_name): def describe_s3_object_tagging(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name object_tagging_info = {} @@ -566,41 +559,40 @@ def describe_s3_object_tagging(connection, bucket_name, object_name): if len(object_tagging_info) != 0: # Remove ResponseMetadata from object_tagging_info, convert to snake_case - del object_tagging_info['ResponseMetadata'] - object_tagging_info = boto3_tag_list_to_ansible_dict(object_tagging_info['TagSet']) + del object_tagging_info["ResponseMetadata"] + object_tagging_info = boto3_tag_list_to_ansible_dict(object_tagging_info["TagSet"]) return object_tagging_info def get_object_details(connection, module, bucket_name, object_name, requested_facts): - all_facts = {} # Remove non-requested facts requested_facts = {fact: value for fact, value in requested_facts.items() if value is True} - all_facts['object_data'] = get_object(connection, bucket_name, object_name)['object_data'] + all_facts["object_data"] = get_object(connection, bucket_name, object_name)["object_data"] # Below APIs do not return object_name, need to add it manually - all_facts['object_name'] = object_name + all_facts["object_name"] = object_name for key in requested_facts: - if key == 'object_acl': + if key == "object_acl": all_facts[key] = {} all_facts[key] = describe_s3_object_acl(connection, bucket_name, object_name) - elif key == 'object_attributes': + elif key == "object_attributes": all_facts[key] = {} all_facts[key] = describe_s3_object_attributes(connection, module, bucket_name, object_name) - elif key == 'object_legal_hold': + elif key == "object_legal_hold": all_facts[key] = {} all_facts[key] = describe_s3_object_legal_hold(connection, bucket_name, object_name) - elif key == 'object_lock_configuration': + elif key == "object_lock_configuration": all_facts[key] = {} all_facts[key] = describe_s3_object_lock_configuration(connection, bucket_name) - elif key == 'object_retention': + elif key == "object_retention": all_facts[key] = {} all_facts[key] = describe_s3_object_retention(connection, bucket_name, object_name) - elif key == 'object_tagging': + elif key == "object_tagging": all_facts[key] = {} all_facts[key] = describe_s3_object_tagging(connection, bucket_name, object_name) @@ -609,8 +601,8 @@ def get_object_details(connection, module, bucket_name, object_name, requested_f def get_object(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name result = {} object_info = {} @@ -622,23 +614,23 @@ def get_object(connection, bucket_name, object_name): if len(object_info) != 0: # Remove ResponseMetadata from object_info, convert to snake_case - del object_info['ResponseMetadata'] + del object_info["ResponseMetadata"] object_info = camel_dict_to_snake_dict(object_info) - result['object_data'] = object_info + result["object_data"] = object_info return result @AWSRetry.jittered_backoff(retries=10) def _list_bucket_objects(connection, **params): - paginator = connection.get_paginator('list_objects') + paginator = connection.get_paginator("list_objects") return paginator.paginate(**params).build_full_result() def list_bucket_objects(connection, module, bucket_name): params = {} - params['Bucket'] = bucket_name + params["Bucket"] = bucket_name result = [] list_objects_response = {} @@ -646,96 +638,63 @@ def list_bucket_objects(connection, module, bucket_name): try: list_objects_response = _list_bucket_objects(connection, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to list bucket objects.') + module.fail_json_aws(e, msg="Failed to list bucket objects.") if len(list_objects_response) != 0: # convert to snake_case - for response_list_item in list_objects_response['Contents']: - result.append(response_list_item['Key']) + for response_list_item in list_objects_response.get("Contents", []): + result.append(response_list_item["Key"]) return result -def bucket_check(connection, module, bucket_name,): +def bucket_check( + connection, + module, + bucket_name, +): try: connection.head_bucket(Bucket=bucket_name) - except is_boto3_error_code(['404', '403']) as e: - module.fail_json_aws(e, msg="The bucket %s does not exist or is missing access permissions." % bucket_name) + except is_boto3_error_code(["404", "403"]) as e: + module.fail_json_aws(e, msg=f"The bucket {bucket_name} does not exist or is missing access permissions.") def object_check(connection, module, bucket_name, object_name): try: connection.head_object(Bucket=bucket_name, Key=object_name) - except is_boto3_error_code(['404', '403']) as e: - module.fail_json_aws(e, msg="The object %s does not exist or is missing access permissions." % object_name) - - -# To get S3 connection, in case of dealing with ceph, dualstack, etc. -def is_fakes3(endpoint_url): - """ Return True if endpoint_url has scheme fakes3:// """ - if endpoint_url is not None: - return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') - else: - return False - - -def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False): - if ceph: # TODO - test this - ceph = urlparse(endpoint_url) - params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', - region=location, endpoint=endpoint_url, **aws_connect_kwargs) - elif is_fakes3(endpoint_url): - fakes3 = urlparse(endpoint_url) - port = fakes3.port - if fakes3.scheme == 'fakes3s': - protocol = "https" - if port is None: - port = 443 - else: - protocol = "http" - if port is None: - port = 80 - params = dict(module=module, conn_type='client', resource='s3', region=location, - endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), - use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) - else: - params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) - if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': - params['config'] = botocore.client.Config(signature_version='s3v4') - elif module.params['mode'] in ('get', 'getstr') and sig_4: - params['config'] = botocore.client.Config(signature_version='s3v4') - if module.params['dualstack']: - dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) - if 'config' in params: - params['config'] = params['config'].merge(dualconf) - else: - params['config'] = dualconf - return boto3_conn(**params) + except is_boto3_error_code(["404", "403"]) as e: + module.fail_json_aws(e, msg=f"The object {object_name} does not exist or is missing access permissions.") def main(): - argument_spec = dict( - object_details=dict(type='dict', options=dict( - object_acl=dict(type='bool', default=False), - object_legal_hold=dict(type='bool', default=False), - object_lock_configuration=dict(type='bool', default=False), - object_retention=dict(type='bool', default=False), - object_tagging=dict(type='bool', default=False), - object_attributes=dict(type='bool', default=False), - attributes_list=dict(type='list', elements='str', choices=['ETag', 'Checksum', 'ObjectParts', 'StorageClass', 'ObjectSize'])), + object_details=dict( + type="dict", + options=dict( + object_acl=dict(type="bool", default=False), + object_legal_hold=dict(type="bool", default=False), + object_lock_configuration=dict(type="bool", default=False), + object_retention=dict(type="bool", default=False), + object_tagging=dict(type="bool", default=False), + object_attributes=dict(type="bool", default=False), + attributes_list=dict( + type="list", + elements="str", + choices=["ETag", "Checksum", "ObjectParts", "StorageClass", "ObjectSize"], + ), + ), required_if=[ ("object_attributes", True, ["attributes_list"]), - ] + ], ), - bucket_name=dict(required=True, type='str'), - object_name=dict(type='str'), - dualstack=dict(default='no', type='bool'), - ceph=dict(default=False, type='bool', aliases=['rgw']), + bucket_name=dict(required=True, type="str"), + object_name=dict(type="str"), + dualstack=dict(default=False, type="bool"), + ceph=dict(default=False, type="bool", aliases=["rgw"]), ) required_if = [ - ['ceph', True, ['endpoint_url']], + ["ceph", True, ["endpoint_url"]], ] module = AnsibleAWSModule( @@ -744,45 +703,31 @@ def main(): required_if=required_if, ) - bucket_name = module.params.get('bucket_name') - object_name = module.params.get('object_name') - requested_object_details = module.params.get('object_details') - endpoint_url = module.params.get('endpoint_url') - dualstack = module.params.get('dualstack') - ceph = module.params.get('ceph') + bucket_name = module.params.get("bucket_name") + object_name = module.params.get("object_name") + requested_object_details = module.params.get("object_details") + endpoint_url = module.params.get("endpoint_url") + dualstack = module.params.get("dualstack") - if not endpoint_url and 'S3_URL' in os.environ: - endpoint_url = os.environ['S3_URL'] + if dualstack and endpoint_url: module.deprecate( - "Support for the 'S3_URL' environment variable has been " - "deprecated. We recommend using the 'endpoint_url' module " - "parameter. Alternatively, the 'AWS_URL' environment variable can " - "be used instead.", - date='2024-12-01', collection_name='amazon.aws', + ( + "Support for passing both the 'dualstack' and 'endpoint_url' parameters at the same " + "time has been deprecated." + ), + date="2024-12-01", + collection_name="amazon.aws", ) - - if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url: - module.fail_json(msg='dualstack only applies to AWS S3') + if "amazonaws.com" not in endpoint_url: + module.fail_json(msg="dualstack only applies to AWS S3") result = [] - - if endpoint_url: - region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if region in ('us-east-1', '', None): - # default to US Standard region - location = 'us-east-1' - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - for key in ['validate_certs', 'security_token', 'profile_name']: - aws_connect_kwargs.pop(key, None) - connection = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url) - else: - try: - connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + extra_params = s3_extra_params(module.params) + retry_decorator = AWSRetry.jittered_backoff() + try: + connection = module.client("s3", retry_decorator=retry_decorator, **extra_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") # check if specified bucket exists bucket_check(connection, module, bucket_name) @@ -790,9 +735,6 @@ def main(): if object_name: object_check(connection, module, bucket_name, object_name) - if requested_object_details and requested_object_details['object_attributes']: - module.require_botocore_at_least('1.24.7', reason='required for s3.get_object_attributes') - if requested_object_details: if object_name: object_details = get_object_details(connection, module, bucket_name, object_name, requested_object_details) @@ -814,5 +756,5 @@ def main(): module.exit_json(object_info=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/sts_assume_role.py b/ansible_collections/amazon/aws/plugins/modules/sts_assume_role.py new file mode 100644 index 000000000..9b5f7418e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/sts_assume_role.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: sts_assume_role +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Assume a role using AWS Security Token Service and obtain temporary credentials +description: + - Assume a role using AWS Security Token Service and obtain temporary credentials. +author: + - Boris Ekelchik (@bekelchik) + - Marek Piatek (@piontas) +options: + role_arn: + description: + - The Amazon Resource Name (ARN) of the role that the caller is + assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). + required: true + type: str + role_session_name: + description: + - Name of the role's session - will be used by CloudTrail. + required: true + type: str + policy: + description: + - Supplemental policy to use in addition to assumed role's policies. + type: str + duration_seconds: + description: + - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours). + - The max depends on the IAM role's sessions duration setting. + - By default, the value is set to 3600 seconds. + type: int + external_id: + description: + - A unique identifier that is used by third parties to assume a role in their customers' accounts. + type: str + mfa_serial_number: + description: + - The identification number of the MFA device that is associated with the user who is making the AssumeRole call. + type: str + mfa_token: + description: + - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. + type: str +notes: + - In order to use the assumed role in a following playbook task you must pass the I(access_key), + I(secret_key) and I(session_token) parameters to modules that should use the assumed credentials. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +RETURN = r""" +sts_creds: + description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token + returned: always + type: dict + sample: + access_key: XXXXXXXXXXXXXXXXXXXX + expiration: '2017-11-11T11:11:11+00:00' + secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +sts_user: + description: The Amazon Resource Name (ARN) and the assumed role ID + returned: always + type: dict + sample: + assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob + arn: ARO123EXAMPLE123:Bob +changed: + description: True if obtaining the credentials succeeds + type: bool + returned: always +""" + +EXAMPLES = r""" +# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) +- amazon.aws.sts_assume_role: + access_key: AKIA1EXAMPLE1EXAMPLE + secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE + role_arn: "arn:aws:iam::123456789012:role/someRole" + role_session_name: "someRoleSession" + register: assumed_role + +# Use the assumed role above to tag an instance in account 123456789012 +- amazon.aws.ec2_tag: + access_key: "{{ assumed_role.sts_creds.access_key }}" + secret_key: "{{ assumed_role.sts_creds.secret_key }}" + session_token: "{{ assumed_role.sts_creds.session_token }}" + resource: i-xyzxyz01 + state: present + tags: + MyNewTag: value +""" + +try: + from botocore.exceptions import ClientError + from botocore.exceptions import ParamValidationError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +def _parse_response(response): + credentials = response.get("Credentials", {}) + user = response.get("AssumedRoleUser", {}) + + sts_cred = { + "access_key": credentials.get("AccessKeyId"), + "secret_key": credentials.get("SecretAccessKey"), + "session_token": credentials.get("SessionToken"), + "expiration": credentials.get("Expiration"), + } + sts_user = camel_dict_to_snake_dict(user) + return sts_cred, sts_user + + +def assume_role_policy(connection, module): + params = { + "RoleArn": module.params.get("role_arn"), + "RoleSessionName": module.params.get("role_session_name"), + "Policy": module.params.get("policy"), + "DurationSeconds": module.params.get("duration_seconds"), + "ExternalId": module.params.get("external_id"), + "SerialNumber": module.params.get("mfa_serial_number"), + "TokenCode": module.params.get("mfa_token"), + } + changed = False + + kwargs = dict((k, v) for k, v in params.items() if v is not None) + + try: + response = connection.assume_role(**kwargs) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json_aws(e) + + sts_cred, sts_user = _parse_response(response) + module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user) + + +def main(): + argument_spec = dict( + role_arn=dict(required=True), + role_session_name=dict(required=True), + duration_seconds=dict(required=False, default=None, type="int"), + external_id=dict(required=False, default=None), + policy=dict(required=False, default=None), + mfa_serial_number=dict(required=False, default=None), + mfa_token=dict(required=False, default=None, no_log=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + connection = module.client("sts") + + assume_role_policy(connection, module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/base.py b/ansible_collections/amazon/aws/plugins/plugin_utils/base.py new file mode 100644 index 000000000..3c9066209 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/base.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# (c) 2022 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.errors import AnsibleError +from ansible.module_utils.basic import to_native +from ansible.utils.display import Display + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import check_sdk_version_supported +from ansible_collections.amazon.aws.plugins.module_utils.retries import RetryingBotoClientWrapper +from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import boto3_conn +from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import get_aws_region + +display = Display() + + +class AWSPluginBase: + def warn(self, message): + display.warning(message) + + def debug(self, message): + display.debug(message) + + # Should be overridden with the plugin-type specific exception + def _do_fail(self, message): + raise AnsibleError(message) + + # We don't know what the correct exception is to raise, so the actual "raise" is handled by + # _do_fail() + def fail_aws(self, message, exception=None): + if not exception: + self._do_fail(to_native(message)) + self._do_fail(f"{message}: {to_native(exception)}") + + def client(self, service, retry_decorator=None, **extra_params): + region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self) + kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args.update(extra_params) + conn = boto3_conn(self, conn_type="client", resource=service, **kw_args) + return conn if retry_decorator is None else RetryingBotoClientWrapper(conn, retry_decorator) + + def resource(self, service, **extra_params): + region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self) + kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args.update(extra_params) + return boto3_conn(self, conn_type="resource", resource=service, **kw_args) + + @property + def region(self): + return get_aws_region(self) + + def require_aws_sdk(self, botocore_version=None, boto3_version=None): + return check_sdk_version_supported( + botocore_version=botocore_version, boto3_version=boto3_version, warn=self.warn + ) diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/botocore.py b/ansible_collections/amazon/aws/plugins/plugin_utils/botocore.py new file mode 100644 index 000000000..2fe2ca0eb --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/botocore.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +# (c) 2022 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.module_utils.basic import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import _aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.botocore import _aws_region +from ansible_collections.amazon.aws.plugins.module_utils.botocore import _boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleBotocoreError + + +def boto3_conn(plugin, conn_type=None, resource=None, region=None, endpoint=None, **params): + """ + Builds a boto3 resource/client connection cleanly wrapping the most common failures. + Handles: + ValueError, + botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError, + botocore.exceptions.NoRegionError + """ + + try: + return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) + except ValueError as e: + plugin.fail_aws(f"Couldn't connect to AWS: {to_native(e)}") + except ( + botocore.exceptions.ProfileNotFound, + botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, + botocore.exceptions.ConfigParseError, + ) as e: + plugin.fail_aws(to_native(e)) + except botocore.exceptions.NoRegionError: + # ansible_name is added in 2.14 + if hasattr(plugin, "ansible_name"): + plugin.fail_aws( + f"The {plugin.ansible_name} plugin requires a region and none was found in configuration, " + "environment variables or module parameters" + ) + plugin.fail_aws( + "A region is required and none was found in configuration, environment variables or module parameters" + ) + + +def get_aws_connection_info(plugin): + try: + return _aws_connection_info(plugin.get_options()) + except AnsibleBotocoreError as e: + plugin.fail_aws(to_native(e)) + + +def get_aws_region(plugin): + try: + return _aws_region(plugin.get_options()) + except AnsibleBotocoreError as e: + plugin.fail_aws(to_native(e)) diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/connection.py b/ansible_collections/amazon/aws/plugins/plugin_utils/connection.py new file mode 100644 index 000000000..1e3a16678 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/connection.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- + +# (c) 2023 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.connection import ConnectionBase + +from ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase + + +class AWSConnectionBase(AWSPluginBase, ConnectionBase): + def _do_fail(self, message): + raise AnsibleConnectionFailure(message) + + def __init__(self, *args, boto3_version=None, botocore_version=None, **kwargs): + super().__init__(*args, **kwargs) + self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version) diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/inventory.py b/ansible_collections/amazon/aws/plugins/plugin_utils/inventory.py new file mode 100644 index 000000000..144f77a7a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/inventory.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2022, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import boto3 + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.plugins.inventory import BaseInventoryPlugin +from ansible.plugins.inventory import Cacheable +from ansible.plugins.inventory import Constructable + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase +from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import AnsibleBotocoreError + + +def _boto3_session(profile_name=None): + if profile_name is None: + return boto3.Session() + return boto3.session.Session(profile_name=profile_name) + + +class AWSInventoryBase(BaseInventoryPlugin, Constructable, Cacheable, AWSPluginBase): + class TemplatedOptions: + # When someone looks up the TEMPLATABLE_OPTIONS using get() any templates + # will be templated using the loader passed to parse. + TEMPLATABLE_OPTIONS = ( + "access_key", + "secret_key", + "session_token", + "profile", + "iam_role_name", + ) + + def __init__(self, templar, options): + self.original_options = options + self.templar = templar + + def __getitem__(self, *args): + return self.original_options.__getitem__(self, *args) + + def __setitem__(self, *args): + return self.original_options.__setitem__(self, *args) + + def get(self, *args): + value = self.original_options.get(*args) + if not value: + return value + if args[0] not in self.TEMPLATABLE_OPTIONS: + return value + if not self.templar.is_template(value): + return value + + return self.templar.template(variable=value, disable_lookups=False) + + def get_options(self, *args): + original_options = super().get_options(*args) + if not self.templar: + return original_options + return self.TemplatedOptions(self.templar, original_options) + + def __init__(self): + super().__init__() + self._frozen_credentials = {} + + # pylint: disable=too-many-arguments + def parse(self, inventory, loader, path, cache=True, botocore_version=None, boto3_version=None): + super().parse(inventory, loader, path) + self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version) + self._read_config_data(path) + self._set_frozen_credentials() + + def client(self, *args, **kwargs): + kw_args = dict(self._frozen_credentials) + kw_args.update(kwargs) + return super().client(*args, **kw_args) + + def resource(self, *args, **kwargs): + kw_args = dict(self._frozen_credentials) + kw_args.update(kwargs) + return super().resource(*args, **kw_args) + + def _freeze_iam_role(self, iam_role_arn): + if hasattr(self, "ansible_name"): + role_session_name = f"ansible_aws_{self.ansible_name}_dynamic_inventory" + else: + role_session_name = "ansible_aws_dynamic_inventory" + assume_params = {"RoleArn": iam_role_arn, "RoleSessionName": role_session_name} + + try: + sts = self.client("sts") + assumed_role = sts.assume_role(**assume_params) + except AnsibleBotocoreError as e: + self.fail_aws(f"Unable to assume role {iam_role_arn}", exception=e) + + credentials = assumed_role.get("Credentials") + if not credentials: + self.fail_aws(f"Unable to assume role {iam_role_arn}") + + self._frozen_credentials = { + "profile_name": None, + "aws_access_key_id": credentials.get("AccessKeyId"), + "aws_secret_access_key": credentials.get("SecretAccessKey"), + "aws_session_token": credentials.get("SessionToken"), + } + + def _set_frozen_credentials(self): + options = self.get_options() + iam_role_arn = options.get("assume_role_arn") + if iam_role_arn: + self._freeze_iam_role(iam_role_arn) + + def _describe_regions(self, service): + # Try pulling a list of regions from the service + try: + initial_region = self.region or "us-east-1" + client = self.client(service, region=initial_region) + resp = client.describe_regions() + except AttributeError: + # Not all clients support describe + pass + except is_boto3_error_code("UnauthorizedOperation"): + self.warn(f"UnauthorizedOperation when trying to list {service} regions") + except botocore.exceptions.NoRegionError: + self.warn(f"NoRegionError when trying to list {service} regions") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.warn(f"Unexpected error while trying to list {service} regions: {e}") + else: + regions = [x["RegionName"] for x in resp.get("Regions", [])] + if regions: + return regions + return None + + def _boto3_regions(self, service): + options = self.get_options() + + if options.get("regions"): + return options.get("regions") + + # boto3 has hard coded lists of available regions for resources, however this does bit-rot + # As such we try to query the service, and fall back to ec2 for a list of regions + for resource_type in list({service, "ec2"}): + regions = self._describe_regions(resource_type) + if regions: + return regions + + # fallback to local list hardcoded in boto3 if still no regions + session = _boto3_session(options.get("profile")) + regions = session.get_available_regions(service) + + if not regions: + # I give up, now you MUST give me regions + self.fail_aws( + "Unable to get regions list from available methods, you must specify the 'regions' option to continue." + ) + + return regions + + def all_clients(self, service): + """ + Generator that yields a boto3 client and the region + + :param service: The boto3 service to connect to. + + Note: For services which don't support 'DescribeRegions' this may include bad + endpoints, and as such EndpointConnectionError should be cleanly handled as a non-fatal + error. + """ + regions = self._boto3_regions(service=service) + + for region in regions: + connection = self.client(service, region=region) + yield connection, region + + def get_cached_result(self, path, cache): + # false when refresh_cache or --flush-cache is used + if not cache: + return False, None + # get the user-specified directive + if not self.get_option("cache"): + return False, None + + cache_key = self.get_cache_key(path) + try: + cached_value = self._cache[cache_key] + except KeyError: + # if cache expires or cache file doesn"t exist + return False, None + + return True, cached_value + + def update_cached_result(self, path, cache, result): + if not self.get_option("cache"): + return + + cache_key = self.get_cache_key(path) + # We weren't explicitly told to flush the cache, and there's already a cache entry, + # this means that the result we're being passed came from the cache. As such we don't + # want to "update" the cache as that could reset a TTL on the cache entry. + if cache and cache_key in self._cache: + return + + self._cache[cache_key] = result + + def verify_file(self, path): + """ + :param path: the path to the inventory config file + :return the contents of the config file + """ + if not super().verify_file(path): + return False + + if hasattr(self, "INVENTORY_FILE_SUFFIXES"): + if not path.endswith(self.INVENTORY_FILE_SUFFIXES): + return False + + return True diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/lookup.py b/ansible_collections/amazon/aws/plugins/plugin_utils/lookup.py new file mode 100644 index 000000000..635d161d1 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/lookup.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- + +# (c) 2022 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + +from ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase + + +class AWSLookupBase(AWSPluginBase, LookupBase): + def _do_fail(self, message): + raise AnsibleLookupError(message) + + def run(self, terms, variables, botocore_version=None, boto3_version=None, **kwargs): + self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version) + self.set_options(var_options=variables, direct=kwargs) |